From f2fc245aebdaeba43cf51f29e3f506979c360174 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 14:10:46 +0200 Subject: [PATCH 01/28] Bump lxml from 4.9.1 to 4.9.3 (#387) --- .github/workflows/ci-cd.yml | 3 ++- composite/action.yml | 2 +- python/requirements-direct.txt | 2 +- python/requirements.txt | 2 +- python/test/test_action_script.py | 14 +++++++------- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index d43dce31..259a1dd6 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -53,7 +53,8 @@ jobs: python-version: "3.11" - os: ubuntu-latest python-version: "3.11" - # installing lxml fails for Python 3.11 on Windows + - os: windows-latest + python-version: "3.11" - os: macos-latest python-version: "3.10" diff --git a/composite/action.yml b/composite/action.yml index dec0448a..ba512f8b 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -192,7 +192,7 @@ runs: continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-d81e5b217e041ea3f958821f6daca2f5 + key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-6ca4d32816ff499719c9ab474caf6c68 - name: Install Python dependencies run: | diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt index b3d52c3e..e0dd2cd2 100644 --- a/python/requirements-direct.txt +++ b/python/requirements-direct.txt @@ -1,6 +1,6 @@ humanize==3.14.0 junitparser==3.1.0 -lxml==4.9.1 +lxml==4.9.3 psutil==5.9.5 PyGithub==2.0.1rc0 requests==2.31.0 diff --git a/python/requirements.txt b/python/requirements.txt index e9d792a9..ecb99aab 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,7 +1,7 @@ humanize==3.14.0 junitparser==3.1.0 future==0.18.3 -lxml==4.9.1 +lxml==4.9.3 psutil==5.9.5 PyGithub==2.0.1rc0 Deprecated==1.2.14 diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index 785fb50a..acb5a129 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -1006,8 +1006,8 @@ def test_parse_files(self): self.assertEqual([], gha.method_calls) self.assertEqual(145, actual.files) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + # on macOS and below Python 3.9 we see one particular error self.assertEqual(17, len(actual.errors)) self.assertEqual(731, actual.suites) self.assertEqual(4109, actual.suite_tests) @@ -1058,7 +1058,7 @@ def test_parse_files(self): '::error file=malformed-json.json::Error processing result file: Unsupported file format: malformed-json.json', '::error file=non-json.json::Error processing result file: Unsupported file format: non-json.json', ] - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): expected.extend([ '::error::lxml.etree.XMLSyntaxError: Failure to process entity xxe, line 17, column 51', '::error file=NUnit-sec1752-file.xml::Error processing result file: Failure to process entity xxe, line 17, column 51 (NUnit-sec1752-file.xml, line 17)', @@ -1088,8 +1088,8 @@ def test_parse_files_with_suite_details(self): **options) actual = parse_files(settings, gha) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + # on macOS and Python below 3.9 we see one particular error self.assertEqual(363, len(actual.suite_details)) else: self.assertEqual(365, len(actual.suite_details)) @@ -1171,8 +1171,8 @@ def test_main(self): # Publisher.publish is expected to have been called with these arguments results, cases, conclusion = m.call_args_list[0].args self.assertEqual(145, results.files) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + # on macOS and below Python 3.9 we see one particular error self.assertEqual(731, results.suites) self.assertEqual(731, len(results.suite_details)) self.assertEqual(1811, len(cases)) From 6ddaf27772c896d356998657d62b88e47486cb51 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Thu, 21 Sep 2023 22:42:26 +0200 Subject: [PATCH 02/28] Rework structure of CI workflow (#494) --- .github/actions/test/action.yml | 131 +++++++ .github/workflows/ci-cd.yml | 575 ++---------------------------- .github/workflows/publish.yml | 366 +++++++++++++++++++ .github/workflows/test-os.yml | 31 ++ python/test/test_action_script.py | 20 +- python/test/test_cicd_yml.py | 2 +- 6 files changed, 565 insertions(+), 560 deletions(-) create mode 100644 .github/actions/test/action.yml create mode 100644 .github/workflows/publish.yml create mode 100644 .github/workflows/test-os.yml diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml new file mode 100644 index 00000000..9bf1001c --- /dev/null +++ b/.github/actions/test/action.yml @@ -0,0 +1,131 @@ +name: 'Test' +author: 'EnricoMi' +description: 'A GitHub Action that tests this action' + +inputs: + os: + description: operating system, e.g. ubuntu-22.04 + required: true + python-version: + description: Python version, e.g. 3.11 + required: true + +runs: + using: 'composite' + steps: + - name: Setup Ubuntu + if: startsWith(inputs.os, 'ubuntu') + run: | + sudo apt-get update + sudo apt-get install language-pack-en language-pack-de + shell: bash + + - name: Setup Python + if: inputs.python-version != 'installed' + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} + + - name: Checkout + uses: actions/checkout@v3 + + - name: Detect OS + id: os + env: + OS: ${{ inputs.os }} + run: | + case "$OS" in + ubuntu*) + echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT + ;; + macos*) + echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT + ;; + windows*) + echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT + ;; + esac + echo "date=$(date +%Y%m%d 2> /dev/null || true)" >> $GITHUB_OUTPUT + shell: bash + + - name: Cache PIP Packages + uses: actions/cache@v3 + id: cache + with: + path: ${{ steps.os.outputs.pip-cache }} + key: ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }} + restore-keys: | + ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}- + ${{ inputs.os }}-pip-test-${{ inputs.python-version }}- + ${{ inputs.os }}-pip-test- + + - name: Install Python dependencies + run: | + python3 -V + python3 -m pip freeze | sort + python3 -m pip cache info || true + python3 -m pip cache list || true + python3 -m pip install --upgrade --force pip wheel + python3 -m pip install --force -r python/requirements.txt + python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt + python3 -m pip freeze | sort + python3 -m pip cache info || true + python3 -m pip cache list || true + shell: bash + + - name: Update expectation files + id: changes + continue-on-error: true + run: | + python/test/files/update_expectations.sh + git status + + if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]] + then + zip changes.zip $(git diff --name-only) $(git ls-files -o --exclude-standard) + exit 1 + fi + shell: bash + - name: Upload changed expectation files + if: steps.changes.outcome == 'failure' + uses: actions/upload-artifact@v3 + with: + name: Changed expectations + path: changed-expectations.zip + + - name: PyTest + env: + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml + shell: bash + + - name: PyTest (EST) + env: + TZ: US/Eastern + LANG: "en_US.UTF-8" + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml + shell: bash + + - name: PyTest (CET) + env: + TZ: Europe/Berlin + LANG: "de_DE.UTF-8" + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml + shell: bash + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v3 + with: + name: Test Results (python-${{ inputs.python-version }}, ${{ inputs.os }}) + path: | + test-results/*.xml + unit-test-results.json diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 259a1dd6..1934841e 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -30,568 +30,41 @@ jobs: .github/upgrade-pip-packages.sh shell: bash - test: - name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }}) - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: - - macos-11 - - macos-12 - - macos-latest - - ubuntu-20.04 - - ubuntu-22.04 - - ubuntu-latest - - windows-2019 - - windows-2022 - - windows-latest - python-version: ["3.8", "installed"] - - include: - - os: macos-latest - python-version: "3.11" - - os: ubuntu-latest - python-version: "3.11" - - os: windows-latest - python-version: "3.11" - - - os: macos-latest - python-version: "3.10" - - os: ubuntu-latest - python-version: "3.10" - - os: windows-latest - python-version: "3.10" - - - os: macos-latest - python-version: "3.9" - - os: ubuntu-latest - python-version: "3.9" - - os: windows-latest - python-version: "3.9" - - - os: macos-latest - python-version: "3.7" - - os: ubuntu-latest - python-version: "3.7" - - os: windows-latest - python-version: "3.7" - - - steps: - - name: Setup Ubuntu - if: startsWith(matrix.os, 'ubuntu') - run: | - sudo apt-get update - sudo apt-get install language-pack-en language-pack-de - shell: bash - - - name: Setup Python - if: matrix.python-version != 'installed' - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Checkout - uses: actions/checkout@v3 - - - name: Detect OS - id: os - env: - OS: ${{ matrix.os }} - run: | - case "$OS" in - ubuntu*) - echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT - ;; - macos*) - echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT - ;; - windows*) - echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT - ;; - esac - echo "date=$(date +%Y%m%d 2> /dev/null || true)" >> $GITHUB_OUTPUT - shell: bash - - - name: Cache PIP Packages - uses: actions/cache@v3 - id: cache - with: - path: ${{ steps.os.outputs.pip-cache }} - key: ${{ matrix.os }}-pip-test-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }} - restore-keys: | - ${{ matrix.os }}-pip-test-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}- - ${{ matrix.os }}-pip-test-${{ matrix.python-version }}- - ${{ matrix.os }}-pip-test- - - - name: Install Python dependencies - run: | - python3 -V - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true - python3 -m pip install --upgrade --force pip wheel - python3 -m pip install --force -r python/requirements.txt - python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true - shell: bash - - - name: Update expectation files - id: changes - continue-on-error: true - run: | - python/test/files/update_expectations.sh - git status - - if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]] - then - zip changes.zip $(git diff --name-only) $(git ls-files -o --exclude-standard) - exit 1 - fi - shell: bash - - name: Upload changed expectation files - if: steps.changes.outcome == 'failure' - uses: actions/upload-artifact@v3 - with: - name: Changed expectations - path: changed-expectations.zip - - - name: PyTest - env: - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml - shell: bash - - - name: PyTest (EST) - env: - TZ: US/Eastern - LANG: "en_US.UTF-8" - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml - shell: bash - - - name: PyTest (CET) - env: - TZ: Europe/Berlin - LANG: "de_DE.UTF-8" - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml - shell: bash - - - name: Upload Test Results - if: always() - uses: actions/upload-artifact@v3 - with: - name: Test Results (python-${{ matrix.python-version }}, ${{ matrix.os }}) - path: | - test-results/*.xml - unit-test-results.json - - publish-dockerfile: - name: Publish Test Results (Dockerfile) - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Dockerfile) - files: "artifacts/**/*.xml" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-docker-image: - name: Publish Test Results (Docker Image) - needs: test + test-mac: + name: "Test macOS" + uses: "./.github/workflows/test-os.yml" + with: + os: '["macos-11", "macos-12", "macos-13"]' + + test-lnx: + name: "Test Ubuntu" + uses: "./.github/workflows/test-os.yml" + with: + os: '["ubuntu-20.04", "ubuntu-22.04"]' + + test-win: + name: "Test Windows" + uses: "./.github/workflows/test-os.yml" + with: + os: '["windows-2019", "windows-2022"]' + + publish: + name: "Publish" + needs: [test-mac, test-lnx, test-win] # we run the action from this branch whenever we can (when it runs in our repo's context) if: > always() && github.event.sender.login != 'dependabot[bot]' && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest + uses: "./.github/workflows/publish.yml" permissions: checks: write pull-requests: write security-events: write - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build Docker image - id: build - uses: docker/build-push-action@v4 - with: - load: true - push: false - tags: enricomi/publish-unit-test-result-action:latest - outputs: type=docker - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Publish Test Results - id: test-results - if: always() - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - INPUT_CHECK_NAME: Test Results (Docker Image) - INPUT_FILES: "artifacts/**/*.xml" - INPUT_JSON_FILE: "tests.json" - INPUT_JSON_SUITE_DETAILS: true - INPUT_JSON_TEST_CASE_RESULTS: true - INPUT_REPORT_SUITE_LOGS: "any" - run: | - docker run \ - --workdir $GITHUB_WORKSPACE \ - --rm \ - -e "INPUT_CHECK_NAME" \ - -e "INPUT_JSON_FILE" \ - -e "INPUT_JSON_SUITE_DETAILS" \ - -e "INPUT_JSON_TEST_CASE_RESULTS" \ - -e "INPUT_LOG_LEVEL" \ - -e "INPUT_ROOT_LOG_LEVEL" \ - -e "INPUT_GITHUB_TOKEN" \ - -e "INPUT_GITHUB_TOKEN_ACTOR" \ - -e "INPUT_GITHUB_RETRIES" \ - -e "INPUT_COMMIT" \ - -e "INPUT_COMMENT_TITLE" \ - -e "INPUT_COMMENT_MODE" \ - -e "INPUT_FAIL_ON" \ - -e "INPUT_ACTION_FAIL" \ - -e "INPUT_ACTION_FAIL_ON_INCONCLUSIVE" \ - -e "INPUT_FILES" \ - -e "INPUT_JUNIT_FILES" \ - -e "INPUT_NUNIT_FILES" \ - -e "INPUT_XUNIT_FILES" \ - -e "INPUT_TRX_FILES" \ - -e "INPUT_TIME_UNIT" \ - -e "INPUT_REPORT_INDIVIDUAL_RUNS" \ - -e "INPUT_REPORT_SUITE_LOGS" \ - -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ - -e "INPUT_LARGE_FILES" \ - -e "INPUT_IGNORE_RUNS" \ - -e "INPUT_JOB_SUMMARY" \ - -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ - -e "INPUT_PULL_REQUEST_BUILD" \ - -e "INPUT_EVENT_FILE" \ - -e "INPUT_EVENT_NAME" \ - -e "INPUT_TEST_CHANGES_LIMIT" \ - -e "INPUT_CHECK_RUN_ANNOTATIONS" \ - -e "INPUT_CHECK_RUN_ANNOTATIONS_BRANCH" \ - -e "INPUT_SECONDS_BETWEEN_GITHUB_READS" \ - -e "INPUT_SECONDS_BETWEEN_GITHUB_WRITES" \ - -e "INPUT_SECONDARY_RATE_LIMIT_WAIT_SECONDS" \ - -e "INPUT_JSON_THOUSANDS_SEPARATOR" \ - -e "INPUT_SEARCH_PULL_REQUESTS" \ - -e "HOME" \ - -e "GITHUB_JOB" \ - -e "GITHUB_REF" \ - -e "GITHUB_SHA" \ - -e "GITHUB_REPOSITORY" \ - -e "GITHUB_REPOSITORY_OWNER" \ - -e "GITHUB_RUN_ID" \ - -e "GITHUB_RUN_NUMBER" \ - -e "GITHUB_RETENTION_DAYS" \ - -e "GITHUB_RUN_ATTEMPT" \ - -e "GITHUB_ACTOR" \ - -e "GITHUB_TRIGGERING_ACTOR" \ - -e "GITHUB_WORKFLOW" \ - -e "GITHUB_HEAD_REF" \ - -e "GITHUB_BASE_REF" \ - -e "GITHUB_EVENT_NAME" \ - -e "GITHUB_SERVER_URL" \ - -e "GITHUB_API_URL" \ - -e "GITHUB_GRAPHQL_URL" \ - -e "GITHUB_REF_NAME" \ - -e "GITHUB_REF_PROTECTED" \ - -e "GITHUB_REF_TYPE" \ - -e "GITHUB_WORKSPACE" \ - -e "GITHUB_ACTION" \ - -e "GITHUB_EVENT_PATH" \ - -e "GITHUB_ACTION_REPOSITORY" \ - -e "GITHUB_ACTION_REF" \ - -e "GITHUB_PATH" \ - -e "GITHUB_ENV" \ - -e "GITHUB_STEP_SUMMARY" \ - -e "GITHUB_STATE" \ - -e "GITHUB_OUTPUT" \ - -e "RUNNER_OS" \ - -e "RUNNER_ARCH" \ - -e "RUNNER_NAME" \ - -e "RUNNER_TOOL_CACHE" \ - -e "RUNNER_TEMP" \ - -e "RUNNER_WORKSPACE" \ - -e "ACTIONS_RUNTIME_URL" \ - -e "ACTIONS_RUNTIME_TOKEN" \ - -e "ACTIONS_CACHE_URL" \ - -e GITHUB_ACTIONS=true \ - -e CI=true \ - -v "$RUNNER_TEMP":"$RUNNER_TEMP" \ - -v "/var/run/docker.sock":"/var/run/docker.sock" \ - -v "/home/runner/work/_temp/_github_home":"/github/home" \ - -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ - -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ - -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ - enricomi/publish-unit-test-result-action:latest - shell: bash - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - - name: Scan for vulnerabilities - id: scan - uses: crazy-max/ghaction-container-scan@v2 - with: - image: enricomi/publish-unit-test-result-action:latest - dockerfile: ./Dockerfile - annotations: true - - name: Upload SARIF artifact - uses: actions/upload-artifact@v3 - with: - name: SARIF - path: ${{ steps.scan.outputs.sarif }} - - name: Upload SARIF file - if: always() && steps.scan.outputs.sarif != '' - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: ${{ steps.scan.outputs.sarif }} - - publish-composite: - name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ${{ matrix.os }} - permissions: - checks: write - pull-requests: write - - strategy: - fail-fast: false - max-parallel: 3 - matrix: - # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources - # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) - include: - - os: macos-latest - os-label: macOS - python: "3.8" - - os: macos-latest - os-label: macOS - python: "installed" - - os: macos-11 - os-label: macOS 11 - python: "installed" - - - os: ubuntu-latest - os-label: Linux - python: "3.8" - - os: ubuntu-latest - os-label: Linux - python: "installed" - - os: ubuntu-20.04 - os-label: Linux 20.04 - python: "installed" - - - os: windows-latest - os-label: Windows - python: "installed" - - os: windows-2019 - os-label: Windows 2019 - python: "installed" - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Setup Python - if: matrix.python != 'installed' - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python }} - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Publish Test Results - id: test-results - uses: ./composite - with: - check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) - files: | - artifacts/**/*.xml - artifacts\**\*.xml - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-test-files: - name: Publish Test Files - # does not really depend on 'tests' but can be executed together with other publish tasks just for good taste - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Copy test result files - run: cp -rv python/test/files test-files - shell: bash - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Test Files) - fail_on: nothing - files: | - test-files/**/*.xml - test-files/**/*.trx - test-files/**/*.json - junit_files: "test-files/junit-xml/**/*.xml" - nunit_files: "test-files/nunit/**/*.xml" - xunit_files: "test-files/xunit/**/*.xml" - trx_files: "test-files/trx/**/*.trx" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-test-file: - name: Publish Test File - # does not really depend on 'tests' but can be executed together with other publish tasks just for good taste - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Copy test junit xml files - run: cp -rv python/test/files/junit-xml test-files - shell: bash - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Test File) - fail_on: nothing - files: "test-files/pytest/junit.gloo.standalone.xml" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - config-deploy: name: Configure Deployment - needs: test + needs: [test-mac, test-lnx, test-win] # do not build or deploy on forked repositories if: github.repository_owner == 'EnricoMi' runs-on: ubuntu-latest @@ -628,7 +101,7 @@ jobs: deploy: name: Deploy to GitHub - needs: [test, publish-dockerfile, publish-docker-image, publish-composite, publish-test-file, publish-test-files, config-deploy] + needs: [publish, config-deploy] # do not build or deploy on forked repositories if: github.repository_owner == 'EnricoMi' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..5272369c --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,366 @@ +name: Publish + +on: + workflow_call: + +jobs: + publish-dockerfile: + name: Publish Test Results (Dockerfile) + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Dockerfile) + files: "artifacts/**/*.xml" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-docker-image: + name: Publish Test Results (Docker Image) + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + security-events: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build Docker image + id: build + uses: docker/build-push-action@v4 + with: + load: true + push: false + tags: enricomi/publish-unit-test-result-action:latest + outputs: type=docker + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + if: always() + env: + INPUT_GITHUB_TOKEN: ${{ github.token }} + INPUT_CHECK_NAME: Test Results (Docker Image) + INPUT_FILES: "artifacts/**/*.xml" + INPUT_JSON_FILE: "tests.json" + INPUT_JSON_SUITE_DETAILS: true + INPUT_JSON_TEST_CASE_RESULTS: true + INPUT_REPORT_SUITE_LOGS: "any" + run: | + docker run \ + --workdir $GITHUB_WORKSPACE \ + --rm \ + -e "INPUT_CHECK_NAME" \ + -e "INPUT_JSON_FILE" \ + -e "INPUT_JSON_SUITE_DETAILS" \ + -e "INPUT_JSON_TEST_CASE_RESULTS" \ + -e "INPUT_LOG_LEVEL" \ + -e "INPUT_ROOT_LOG_LEVEL" \ + -e "INPUT_GITHUB_TOKEN" \ + -e "INPUT_GITHUB_TOKEN_ACTOR" \ + -e "INPUT_GITHUB_RETRIES" \ + -e "INPUT_COMMIT" \ + -e "INPUT_COMMENT_TITLE" \ + -e "INPUT_COMMENT_MODE" \ + -e "INPUT_FAIL_ON" \ + -e "INPUT_ACTION_FAIL" \ + -e "INPUT_ACTION_FAIL_ON_INCONCLUSIVE" \ + -e "INPUT_FILES" \ + -e "INPUT_JUNIT_FILES" \ + -e "INPUT_NUNIT_FILES" \ + -e "INPUT_XUNIT_FILES" \ + -e "INPUT_TRX_FILES" \ + -e "INPUT_TIME_UNIT" \ + -e "INPUT_REPORT_INDIVIDUAL_RUNS" \ + -e "INPUT_REPORT_SUITE_LOGS" \ + -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ + -e "INPUT_LARGE_FILES" \ + -e "INPUT_IGNORE_RUNS" \ + -e "INPUT_JOB_SUMMARY" \ + -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ + -e "INPUT_PULL_REQUEST_BUILD" \ + -e "INPUT_EVENT_FILE" \ + -e "INPUT_EVENT_NAME" \ + -e "INPUT_TEST_CHANGES_LIMIT" \ + -e "INPUT_CHECK_RUN_ANNOTATIONS" \ + -e "INPUT_CHECK_RUN_ANNOTATIONS_BRANCH" \ + -e "INPUT_SECONDS_BETWEEN_GITHUB_READS" \ + -e "INPUT_SECONDS_BETWEEN_GITHUB_WRITES" \ + -e "INPUT_SECONDARY_RATE_LIMIT_WAIT_SECONDS" \ + -e "INPUT_JSON_THOUSANDS_SEPARATOR" \ + -e "INPUT_SEARCH_PULL_REQUESTS" \ + -e "HOME" \ + -e "GITHUB_JOB" \ + -e "GITHUB_REF" \ + -e "GITHUB_SHA" \ + -e "GITHUB_REPOSITORY" \ + -e "GITHUB_REPOSITORY_OWNER" \ + -e "GITHUB_RUN_ID" \ + -e "GITHUB_RUN_NUMBER" \ + -e "GITHUB_RETENTION_DAYS" \ + -e "GITHUB_RUN_ATTEMPT" \ + -e "GITHUB_ACTOR" \ + -e "GITHUB_TRIGGERING_ACTOR" \ + -e "GITHUB_WORKFLOW" \ + -e "GITHUB_HEAD_REF" \ + -e "GITHUB_BASE_REF" \ + -e "GITHUB_EVENT_NAME" \ + -e "GITHUB_SERVER_URL" \ + -e "GITHUB_API_URL" \ + -e "GITHUB_GRAPHQL_URL" \ + -e "GITHUB_REF_NAME" \ + -e "GITHUB_REF_PROTECTED" \ + -e "GITHUB_REF_TYPE" \ + -e "GITHUB_WORKSPACE" \ + -e "GITHUB_ACTION" \ + -e "GITHUB_EVENT_PATH" \ + -e "GITHUB_ACTION_REPOSITORY" \ + -e "GITHUB_ACTION_REF" \ + -e "GITHUB_PATH" \ + -e "GITHUB_ENV" \ + -e "GITHUB_STEP_SUMMARY" \ + -e "GITHUB_STATE" \ + -e "GITHUB_OUTPUT" \ + -e "RUNNER_OS" \ + -e "RUNNER_ARCH" \ + -e "RUNNER_NAME" \ + -e "RUNNER_TOOL_CACHE" \ + -e "RUNNER_TEMP" \ + -e "RUNNER_WORKSPACE" \ + -e "ACTIONS_RUNTIME_URL" \ + -e "ACTIONS_RUNTIME_TOKEN" \ + -e "ACTIONS_CACHE_URL" \ + -e GITHUB_ACTIONS=true \ + -e CI=true \ + -v "$RUNNER_TEMP":"$RUNNER_TEMP" \ + -v "/var/run/docker.sock":"/var/run/docker.sock" \ + -v "/home/runner/work/_temp/_github_home":"/github/home" \ + -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ + -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ + -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ + enricomi/publish-unit-test-result-action:latest + shell: bash + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + - name: Scan for vulnerabilities + id: scan + uses: crazy-max/ghaction-container-scan@v2 + with: + image: enricomi/publish-unit-test-result-action:latest + dockerfile: ./Dockerfile + annotations: true + - name: Upload SARIF artifact + uses: actions/upload-artifact@v3 + with: + name: SARIF + path: ${{ steps.scan.outputs.sarif }} + - name: Upload SARIF file + if: always() && steps.scan.outputs.sarif != '' + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: ${{ steps.scan.outputs.sarif }} + + publish-composite: + name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + runs-on: ${{ matrix.os }} + permissions: + checks: write + pull-requests: write + + strategy: + fail-fast: false + max-parallel: 3 + matrix: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) + include: + - os: macos-latest + os-label: macOS + python: "3.8" + - os: macos-latest + os-label: macOS + python: "installed" + - os: macos-11 + os-label: macOS 11 + python: "installed" + + - os: ubuntu-latest + os-label: Linux + python: "3.8" + - os: ubuntu-latest + os-label: Linux + python: "installed" + - os: ubuntu-20.04 + os-label: Linux 20.04 + python: "installed" + + - os: windows-latest + os-label: Windows + python: "installed" + - os: windows-2019 + os-label: Windows 2019 + python: "installed" + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup Python + if: matrix.python != 'installed' + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + uses: ./composite + with: + check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + files: | + artifacts/**/*.xml + artifacts\**\*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-test-files: + name: Publish Test Files + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Copy test result files + run: cp -rv python/test/files test-files + shell: bash + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Test Files) + fail_on: nothing + files: | + test-files/**/*.xml + test-files/**/*.trx + test-files/**/*.json + junit_files: "test-files/junit-xml/**/*.xml" + nunit_files: "test-files/nunit/**/*.xml" + xunit_files: "test-files/xunit/**/*.xml" + trx_files: "test-files/trx/**/*.trx" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-test-file: + name: Publish Test File + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Copy test junit xml files + run: cp -rv python/test/files/junit-xml test-files + shell: bash + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Test File) + fail_on: nothing + files: "test-files/pytest/junit.gloo.standalone.xml" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' diff --git a/.github/workflows/test-os.yml b/.github/workflows/test-os.yml new file mode 100644 index 00000000..9f90a0c0 --- /dev/null +++ b/.github/workflows/test-os.yml @@ -0,0 +1,31 @@ +name: Test OS + +on: + workflow_call: + inputs: + os: + required: true + type: string +jobs: + test: + name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: ${{ fromJson(inputs.os) }} + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12.0-rc.3", "installed"] + + include: + - os: ${{ fromJson(inputs.os)[0] }} + python-version: "3.7" + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Test + uses: ./.github/actions/test + with: + os: ${{ matrix.os }} + python-version: ${{ matrix.python-version }} diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index acb5a129..cf595c13 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -1,8 +1,8 @@ import io import json -import logging import os import pathlib +import platform import re import sys import tempfile @@ -13,8 +13,8 @@ from packaging.version import Version from publish import __version__, pull_request_build_mode_merge, fail_on_mode_failures, fail_on_mode_errors, \ - fail_on_mode_nothing, comment_modes, comment_mode_always, comment_mode_off, \ - report_suite_out_log, report_suite_err_log, report_suite_logs, report_no_suite_logs, default_report_suite_logs, \ + fail_on_mode_nothing, comment_modes, comment_mode_always, report_suite_out_log, report_suite_err_log, \ + report_suite_logs, report_no_suite_logs, default_report_suite_logs, \ default_annotations, all_tests_list, skipped_tests_list, none_annotations, \ pull_request_build_modes, punctuation_space from publish.github_action import GithubAction @@ -1006,7 +1006,8 @@ def test_parse_files(self): self.assertEqual([], gha.method_calls) self.assertEqual(145, actual.files) - if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): # on macOS and below Python 3.9 we see one particular error self.assertEqual(17, len(actual.errors)) self.assertEqual(731, actual.suites) @@ -1058,7 +1059,8 @@ def test_parse_files(self): '::error file=malformed-json.json::Error processing result file: Unsupported file format: malformed-json.json', '::error file=non-json.json::Error processing result file: Unsupported file format: non-json.json', ] - if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): expected.extend([ '::error::lxml.etree.XMLSyntaxError: Failure to process entity xxe, line 17, column 51', '::error file=NUnit-sec1752-file.xml::Error processing result file: Failure to process entity xxe, line 17, column 51 (NUnit-sec1752-file.xml, line 17)', @@ -1088,8 +1090,9 @@ def test_parse_files_with_suite_details(self): **options) actual = parse_files(settings, gha) - if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): - # on macOS and Python below 3.9 we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): + # on macOS (below macOS 13) and Python below 3.9 we see one particular error self.assertEqual(363, len(actual.suite_details)) else: self.assertEqual(365, len(actual.suite_details)) @@ -1171,7 +1174,8 @@ def test_main(self): # Publisher.publish is expected to have been called with these arguments results, cases, conclusion = m.call_args_list[0].args self.assertEqual(145, results.files) - if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin'): + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): # on macOS and below Python 3.9 we see one particular error self.assertEqual(731, results.suites) self.assertEqual(731, len(results.suite_details)) diff --git a/python/test/test_cicd_yml.py b/python/test/test_cicd_yml.py index 47bbd2c5..6e695db6 100644 --- a/python/test/test_cicd_yml.py +++ b/python/test/test_cicd_yml.py @@ -12,7 +12,7 @@ def test_cicd_workflow(self): with open(project_root / 'action.yml', encoding='utf-8') as r: action = yaml.safe_load(r) - with open(project_root / '.github/workflows/ci-cd.yml', encoding='utf-8') as r: + with open(project_root / '.github/workflows/publish.yml', encoding='utf-8') as r: cicd = yaml.safe_load(r) docker_image_steps = cicd.get('jobs', []).get('publish-docker-image', {}).get('steps', []) From bd22544d04dc502afa372b8b79163ea08885f063 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 22 Sep 2023 08:25:57 +0200 Subject: [PATCH 03/28] Allow for adding or removing test file path prefix (#495) --- .github/workflows/publish.yml | 1 + README.md | 8 ++++-- action.yml | 4 ++- composite/action.yml | 5 ++++ python/publish/junit.py | 21 ++++++++++++-- python/publish/publisher.py | 1 + python/publish_test_results.py | 8 +++++- python/test/test_action_script.py | 14 ++++++++- python/test/test_junit.py | 47 +++++++++++++++++++++++++++++-- python/test/test_publisher.py | 3 +- 10 files changed, 101 insertions(+), 11 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5272369c..37e9f055 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -111,6 +111,7 @@ jobs: -e "INPUT_XUNIT_FILES" \ -e "INPUT_TRX_FILES" \ -e "INPUT_TIME_UNIT" \ + -e "INPUT_TEST_FILE_PREFIX" \ -e "INPUT_REPORT_INDIVIDUAL_RUNS" \ -e "INPUT_REPORT_SUITE_LOGS" \ -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ diff --git a/README.md b/README.md index a302dbc3..fe161e2f 100644 --- a/README.md +++ b/README.md @@ -208,6 +208,9 @@ and the changed files section of related pull requests: ![annotations example changed files](misc/github-pull-request-changes-annotation.png) +***Note:** Annotations for test files are only supported when test file paths in test result files are relative to the repository root. +Use option `test_file_prefix` to add a prefix to, or remove a prefix from these file paths. See [Configuration](#configuration) section for details.* + ***Note:** Only the first failure of a test is shown. If you want to see all failures, set `report_individual_runs: "true"`.* ### GitHub Actions job summary @@ -290,8 +293,9 @@ The list of most notable options: |Option|Default Value|Description| |:-----|:-----:|:----------| -|`time_unit`|`seconds`|Time values in the XML files have this unit. Supports `seconds` and `milliseconds`.| -|`job_summary`|`true`| Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| +|`time_unit`|`seconds`|Time values in the test result files have this unit. Supports `seconds` and `milliseconds`.| +|`test_file_prefix`|`none`|Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".| +|`job_summary`|`true`|Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| |`compare_to_earlier_commit`|`true`|Test results are compared to results of earlier commits to show changes:
`false` - disable comparison, `true` - compare across commits.'| |`test_changes_limit`|`10`|Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of `0`.| |`report_individual_runs`|`false`|Individual runs of the same test may see different failures. Reports all individual failures when set `true`, and the first failure only otherwise.| diff --git a/action.yml b/action.yml index 2bcf081a..145c24ee 100644 --- a/action.yml +++ b/action.yml @@ -60,6 +60,9 @@ inputs: description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' default: 'seconds' required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false report_individual_runs: description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' required: false @@ -135,7 +138,6 @@ inputs: description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' default: 'false' required: false - outputs: json: description: "Test results as JSON" diff --git a/composite/action.yml b/composite/action.yml index ba512f8b..dfdb0df9 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -60,6 +60,9 @@ inputs: description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' default: 'seconds' required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false report_individual_runs: description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' required: false @@ -135,6 +138,7 @@ inputs: description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' default: 'false' required: false + outputs: json: description: "Test results as JSON" @@ -226,6 +230,7 @@ runs: XUNIT_FILES: ${{ inputs.xunit_files }} TRX_FILES: ${{ inputs.trx_files }} TIME_UNIT: ${{ inputs.time_unit }} + TEST_FILE_PREFIX: ${{ inputs.test_file_prefix }} REPORT_INDIVIDUAL_RUNS: ${{ inputs.report_individual_runs }} REPORT_SUITE_LOGS: ${{ inputs.report_suite_logs }} DEDUPLICATE_CLASSES_BY_FILE_NAME: ${{ inputs.deduplicate_classes_by_file_name }} diff --git a/python/publish/junit.py b/python/publish/junit.py index c300b434..79a348c1 100644 --- a/python/publish/junit.py +++ b/python/publish/junit.py @@ -175,7 +175,24 @@ def parse(path: str) -> JUnitTree: return progress_safe_parse_xml_file(files, parse, progress) -def process_junit_xml_elems(trees: Iterable[ParsedJUnitFile], time_factor: float = 1.0, add_suite_details: bool = False) -> ParsedUnitTestResults: +def adjust_prefix(file: Optional[str], prefix: Optional[str]) -> Optional[str]: + if prefix is None or file is None: + return file + + # prefix starts either with '+' or '-' + if prefix.startswith('+'): + # add prefix + return "".join([prefix[1:], file]) + + # remove prefix + return file[len(prefix)-1:] if file.startswith(prefix[1:]) else file + + +def process_junit_xml_elems(trees: Iterable[ParsedJUnitFile], + *, + time_factor: float = 1.0, + test_file_prefix: Optional[str] = None, + add_suite_details: bool = False) -> ParsedUnitTestResults: def create_junitxml(filepath: str, tree: JUnitTree) -> JUnitXmlOrParseError: try: instance = JUnitXml.fromroot(tree.getroot()) @@ -265,7 +282,7 @@ def get_text(elem, tag): cases = [ UnitTestCase( result_file=result_file, - test_file=case._elem.get('file'), + test_file=adjust_prefix(case._elem.get('file'), test_file_prefix), line=int_opt(case._elem.get('line')), class_name=case.classname, test_name=case.name, diff --git a/python/publish/publisher.py b/python/publish/publisher.py index d28ecd71..5fdfb4e8 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -54,6 +54,7 @@ class Settings: nunit_files_glob: Optional[str] xunit_files_glob: Optional[str] trx_files_glob: Optional[str] + test_file_prefix: Optional[str] time_factor: float check_name: str comment_title: str diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 537f858c..6dee1746 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -208,6 +208,7 @@ def parse_files(settings: Settings, gha: GithubAction) -> ParsedUnitTestResultsW return process_junit_xml_elems( elems, time_factor=settings.time_factor, + test_file_prefix=settings.test_file_prefix, add_suite_details=settings.report_suite_out_logs or settings.report_suite_err_logs or settings.json_suite_details ).with_commit(settings.commit) @@ -464,6 +465,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: xunit_files_glob=get_var('XUNIT_FILES', options), trx_files_glob=get_var('TRX_FILES', options), time_factor=time_factor, + test_file_prefix=get_var('TEST_FILE_PREFIX', options) or None, check_name=check_name, comment_title=get_var('COMMENT_TITLE', options) or check_name, comment_mode=comment_mode, @@ -481,12 +483,16 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: seconds_between_github_reads=float(seconds_between_github_reads), seconds_between_github_writes=float(seconds_between_github_writes), secondary_rate_limit_wait_seconds=float(secondary_rate_limit_wait_seconds), - search_pull_requests=get_bool_var('SEARCH_PULL_REQUESTS', options, default=False) + search_pull_requests=get_bool_var('SEARCH_PULL_REQUESTS', options, default=False), ) check_var(settings.token, 'GITHUB_TOKEN', 'GitHub token') check_var(settings.repo, 'GITHUB_REPOSITORY', 'GitHub repository') check_var(settings.commit, 'COMMIT, GITHUB_SHA or event file', 'Commit SHA') + check_var_condition( + settings.test_file_prefix is None or any([settings.test_file_prefix.startswith(sign) for sign in ['-', '+']]), + f"TEST_FILE_PREFIX is optional, but when given, it must start with '-' or '+': {settings.test_file_prefix}" + ) check_var(settings.comment_mode, 'COMMENT_MODE', 'Comment mode', comment_modes) check_var(settings.pull_request_build, 'PULL_REQUEST_BUILD', 'Pull Request build', pull_request_build_modes) check_var(suite_logs_mode, 'REPORT_SUITE_LOGS', 'Report suite logs mode', available_report_suite_logs) diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index cf595c13..d0929f8c 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -180,6 +180,7 @@ def get_settings(token='token', xunit_files_glob='xunit-files', trx_files_glob='trx-files', time_factor=1.0, + test_file_prefix=None, check_name='check name', comment_title='title', comment_mode=comment_mode_always, @@ -228,6 +229,7 @@ def get_settings(token='token', xunit_files_glob=xunit_files_glob, trx_files_glob=trx_files_glob, time_factor=time_factor, + test_file_prefix=test_file_prefix, check_name=check_name, comment_title=comment_title, comment_mode=comment_mode, @@ -245,7 +247,7 @@ def get_settings(token='token', seconds_between_github_reads=seconds_between_github_reads, seconds_between_github_writes=seconds_between_github_writes, secondary_rate_limit_wait_seconds=secondary_rate_limit_wait_seconds, - search_pull_requests=search_pull_requests + search_pull_requests=search_pull_requests, ) def test_get_settings(self): @@ -353,6 +355,16 @@ def test_get_settings_time_unit(self): self.assertIn('TIME_UNIT minutes is not supported. It is optional, ' 'but when given must be one of these values: seconds, milliseconds', re.exception.args) + def test_get_settings_test_file_prefix(self): + self.do_test_get_settings(TEST_FILE_PREFIX=None, expected=self.get_settings(test_file_prefix=None)) + self.do_test_get_settings(TEST_FILE_PREFIX='', expected=self.get_settings(test_file_prefix=None)) + self.do_test_get_settings(TEST_FILE_PREFIX='+src/', expected=self.get_settings(test_file_prefix='+src/')) + self.do_test_get_settings(TEST_FILE_PREFIX='-./', expected=self.get_settings(test_file_prefix='-./')) + + with self.assertRaises(RuntimeError) as re: + self.do_test_get_settings(TEST_FILE_PREFIX='path/', expected=None) + self.assertIn("TEST_FILE_PREFIX is optional, but when given, it must start with '-' or '+': path/", re.exception.args) + def test_get_settings_commit(self): event = {'pull_request': {'head': {'sha': 'sha2'}}} self.do_test_get_settings(INPUT_COMMIT='sha', GITHUB_EVENT_NAME='pull_request', event=event, GITHUB_SHA='default', expected=self.get_settings(commit='sha', event=event, event_name='pull_request', is_fork=True)) diff --git a/python/test/test_junit.py b/python/test/test_junit.py index d71600c7..3cf60214 100644 --- a/python/test/test_junit.py +++ b/python/test/test_junit.py @@ -19,8 +19,8 @@ sys.path.append(str(pathlib.Path(__file__).resolve().parent)) from publish import __version__, available_annotations, none_annotations -from publish.junit import is_junit, parse_junit_xml_files, process_junit_xml_elems, get_results, get_result, get_content, \ - get_message, Disabled, JUnitTreeOrParseError, ParseError +from publish.junit import is_junit, parse_junit_xml_files, adjust_prefix, process_junit_xml_elems, get_results, \ + get_result, get_content, get_message, Disabled, JUnitTreeOrParseError, ParseError from publish.unittestresults import ParsedUnitTestResults, UnitTestCase from publish_test_results import get_test_results, get_stats, get_conclusion from publish.publisher import Publisher @@ -97,6 +97,21 @@ def shorten_filename(cls, filename, prefix=None): else: return filename + def test_adjust_prefix(self): + self.assertEqual(adjust_prefix("file", "+"), "file") + self.assertEqual(adjust_prefix("file", "+."), ".file") + self.assertEqual(adjust_prefix("file", "+./"), "./file") + self.assertEqual(adjust_prefix("file", "+path/"), "path/file") + + self.assertEqual(adjust_prefix("file", "-"), "file") + self.assertEqual(adjust_prefix(".file", "-."), "file") + self.assertEqual(adjust_prefix("./file", "-./"), "file") + self.assertEqual(adjust_prefix("path/file", "-path/"), "file") + self.assertEqual(adjust_prefix("file", "-"), "file") + self.assertEqual(adjust_prefix("file", "-."), "file") + self.assertEqual(adjust_prefix("file", "-./"), "file") + self.assertEqual(adjust_prefix("file", "-path/"), "file") + def do_test_parse_and_process_files(self, filename: str): for locale in [None, 'en_US.UTF-8', 'de_DE.UTF-8']: with self.test.subTest(file=self.shorten_filename(filename), locale=locale): @@ -299,7 +314,7 @@ def test_process_parse_junit_xml_files_with_time_factor(self): for time_factor in [1.0, 10.0, 60.0, 0.1, 0.001]: with self.subTest(time_factor=time_factor): self.assertEqual( - process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), time_factor), + process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), time_factor=time_factor), ParsedUnitTestResults( files=1, errors=[], @@ -379,6 +394,32 @@ def test_process_parse_junit_xml_files_with_time_factor(self): ] )) + def test_process_parse_junit_xml_files_with_test_file_prefix(self): + result_file = str(test_files_path / 'pytest' / 'junit.fail.xml') + for prefix in ["+python/", "-test/", "-src"]: + with self.subTest(prefix=prefix): + test_file = adjust_prefix('test/test_spark.py', prefix) + self.assertEqual( + process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), test_file_prefix=prefix), + ParsedUnitTestResults( + files=1, + errors=[], + suites=1, + suite_tests=5, + suite_skipped=1, + suite_failures=1, + suite_errors=0, + suite_time=2, + suite_details=[], + cases=[ + UnitTestCase(result_file=result_file, test_file=test_file, line=1412, class_name='test.test_spark.SparkTests', test_name='test_check_shape_compatibility', result='success', message=None, content=None, stdout=None, stderr=None, time=6.435), + UnitTestCase(result_file=result_file, test_file=test_file, line=1641, class_name='test.test_spark.SparkTests', test_name='test_get_available_devices', result='skipped', message='get_available_devices only supported in Spark 3.0 and above', content='/horovod/test/test_spark.py:1642: get_available_devices only\n supported in Spark 3.0 and above\n ', stdout=None, stderr=None, time=0.001), + UnitTestCase(result_file=result_file, test_file=test_file, line=1102, class_name='test.test_spark.SparkTests', test_name='test_get_col_info', result='success', message=None, content=None, stdout=None, stderr=None, time=6.417), + UnitTestCase(result_file=result_file, test_file=test_file, line=819, class_name='test.test_spark.SparkTests', test_name='test_rsh_events', result='failure', message='self = def test_rsh_events(self): > self.do_test_rsh_events(3) test_spark.py:821: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ test_spark.py:836: in do_test_rsh_events self.do_test_rsh(command, 143, events=events) test_spark.py:852: in do_test_rsh self.assertEqual(expected_result, res) E AssertionError: 143 != 0', content='self = \n\n def test_rsh_events(self):\n > self.do_test_rsh_events(3)\n\n test_spark.py:821:\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n test_spark.py:836: in do_test_rsh_events\n self.do_test_rsh(command, 143, events=events)\n test_spark.py:852: in do_test_rsh\n self.assertEqual(expected_result, res)\n E AssertionError: 143 != 0\n ', stdout=None, stderr=None, time=7.541), + UnitTestCase(result_file=result_file, test_file=test_file, line=813, class_name='test.test_spark.SparkTests', test_name='test_rsh_with_non_zero_exit_code', result='success', message=None, content=None, stdout=None, stderr=None, time=1.514) + ] + )) + def test_get_results(self): success = TestElement('success') skipped = TestElement('skipped') diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index 711aa040..2306c398 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -120,6 +120,7 @@ def create_settings(actor='actor', xunit_files_glob=None, trx_files_glob=None, time_factor=1.0, + test_file_prefix=None, check_name='Check Name', comment_title='Comment Title', comment_mode=comment_mode, @@ -137,7 +138,7 @@ def create_settings(actor='actor', seconds_between_github_reads=1.5, seconds_between_github_writes=2.5, secondary_rate_limit_wait_seconds=6.0, - search_pull_requests=search_pull_requests + search_pull_requests=search_pull_requests, ) stats = UnitTestRunResults( From d93dbc08d265e4653da0c0af544bee2a851d3e38 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 22 Sep 2023 08:27:29 +0200 Subject: [PATCH 04/28] Releasing v2.10.0 --- action.yml | 2 +- python/publish/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/action.yml b/action.yml index 145c24ee..3944bdf7 100644 --- a/action.yml +++ b/action.yml @@ -144,7 +144,7 @@ outputs: runs: using: 'docker' - image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.9.0' + image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.10.0' branding: icon: 'check-circle' diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 4bec0ce9..cbe2d9ef 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -11,7 +11,7 @@ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError # keep the version in sync with action.yml -__version__ = 'v2.9.0' +__version__ = 'v2.10.0' logger = logging.getLogger('publish') digest_prefix = '[test-results]:data:' From 19b3c297d3229255f603d2e96c185648e69eb5b5 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 22 Sep 2023 20:19:20 +0200 Subject: [PATCH 05/28] Specify option in readme to fail action --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index fe161e2f..d1760e38 100644 --- a/README.md +++ b/README.md @@ -60,9 +60,9 @@ The `if: always()` clause guarantees that this action always runs, even if earli When run multiple times in one workflow, the [option](#configuration) `check_name` has to be set to a unique value for each instance. Otherwise, the multiple runs overwrite each other's results. -***Note:** This action does not fail if tests failed. The action that executed the tests should -fail on test failure. The published results however indicate failure if tests fail or errors occur. -This behaviour is configurable.* +***Note:** By default, this action does not fail if tests failed. This can be [configured](#configuration) via `action_fail`. +The action that executed the tests should fail on test failure. The published results however indicate failure if tests fail or errors occur, +which can be [configured](#configuration) via `fail_on`.* ## Permissions @@ -128,7 +128,7 @@ See workaround for `check_name`. ### Modes `create new` and `update last` removed for option `comment_mode` The action always updates an earlier pull request comment, which is the exact behaviour of mode `update last`. -The configuration options `create new` and `update last` are therefore removed. +The [configuration](#configuration) options `create new` and `update last` are therefore removed. **Impact:** An existing pull request comment is always updated. @@ -150,7 +150,7 @@ Set `comment_mode` to `always` (the default) or `off`. ## Publishing test results -Test results are published on GitHub at various (configurable) places: +Test results are published on GitHub at various ([configurable](#configuration)) places: - as [a comment](#pull-request-comment) in related pull requests - as [a check](#commit-and-pull-request-checks) in the checks section of a commit and related pull requests @@ -401,7 +401,7 @@ is not easily available, e.g. when [creating a badge from test results](#create-
Access JSON via file -The optional `json_file` allows to configure a file where extended JSON information are to be written. +The optional `json_file` allows to [configure](#configuration) a file where extended JSON information are to be written. Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` contain more information than just the number of errors and annotations, respectively. From 38e2922b263a122af1de344c17b5b2ecffbf18a0 Mon Sep 17 00:00:00 2001 From: Victor Sollerhed Date: Sun, 8 Oct 2023 20:50:19 +0200 Subject: [PATCH 06/28] Reduce output from `action_fail` (#511) --- python/publish_test_results.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 6dee1746..7934a299 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -263,8 +263,6 @@ def main(settings: Settings, gha: GithubAction) -> None: if action_fail_required(conclusion, settings.action_fail, settings.action_fail_on_inconclusive): gha.error(f'This action finished successfully, but test results have status {conclusion}.') - gha.error(f'Configuration requires this action to fail (action_fail={settings.action_fail}, ' - f'action_fail_on_inconclusive={settings.action_fail_on_inconclusive}).') sys.exit(1) From 4e4df66ce79b9d4c718b8eacce3b30dd66174250 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Sun, 8 Oct 2023 22:12:06 +0200 Subject: [PATCH 07/28] Test publish on arm64 (#513) --- .github/workflows/publish.yml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 37e9f055..530b68c3 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -44,19 +44,26 @@ jobs: json_file: 'tests.json' publish-docker-image: - name: Publish Test Results (Docker Image) + name: Publish Test Results (Docker Image ${{ matrix.arch }}) runs-on: ubuntu-latest permissions: checks: write pull-requests: write security-events: write + strategy: + fail-fast: false + matrix: + arch: [amd64, arm64] steps: - name: Checkout uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 + with: + image: tonistiigi/binfmt:latest + platforms: ${{ matrix.arch }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -67,6 +74,7 @@ jobs: with: load: true push: false + platforms: linux/${{ matrix.arch }} tags: enricomi/publish-unit-test-result-action:latest outputs: type=docker @@ -80,14 +88,14 @@ jobs: if: always() env: INPUT_GITHUB_TOKEN: ${{ github.token }} - INPUT_CHECK_NAME: Test Results (Docker Image) + INPUT_CHECK_NAME: Test Results (Docker Image ${{ matrix.arch }}) INPUT_FILES: "artifacts/**/*.xml" INPUT_JSON_FILE: "tests.json" INPUT_JSON_SUITE_DETAILS: true INPUT_JSON_TEST_CASE_RESULTS: true INPUT_REPORT_SUITE_LOGS: "any" run: | - docker run \ + docker run --platform linux/${{ matrix.arch }} \ --workdir $GITHUB_WORKSPACE \ --rm \ -e "INPUT_CHECK_NAME" \ From c0b8fea6031e5937d208464a398fd6fb52d59920 Mon Sep 17 00:00:00 2001 From: Yunchi Luo Date: Mon, 9 Oct 2023 03:42:48 -0400 Subject: [PATCH 08/28] Publish ARM images (#512) Allow the action to run on self-hosted ARM64 workers, compatible with AWS Graviton ARM instances. --- .github/workflows/ci-cd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 1934841e..eff49717 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -140,6 +140,7 @@ jobs: with: tags: ${{ steps.docker-meta.outputs.tags }} labels: ${{ steps.docker-meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 pull: true # deploy image actions from commits pushed to master and # deploy Dockerfile actions from pushed version tags (no major versions) From a23f8100a5af1438c30cf74656485244f4373a0c Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Mon, 9 Oct 2023 10:12:22 +0200 Subject: [PATCH 09/28] Mention ARM in README.md (#514) --- README.md | 16 +++++++--------- misc/badge-arm.svg | 20 ++++++++++++++++++++ misc/badge-dart.svg | 20 -------------------- misc/badge-js.svg | 20 ++++++++++++++++++++ misc/badge-junit-xml.svg | 20 -------------------- misc/badge-mocha.svg | 20 -------------------- misc/badge-nunit-xml.svg | 20 -------------------- misc/badge-trx.svg | 24 ++++++++++++------------ misc/badge-xml.svg | 20 ++++++++++++++++++++ misc/badge-xunit-xml.svg | 20 -------------------- 10 files changed, 79 insertions(+), 121 deletions(-) create mode 100644 misc/badge-arm.svg delete mode 100644 misc/badge-dart.svg create mode 100644 misc/badge-js.svg delete mode 100644 misc/badge-junit-xml.svg delete mode 100644 misc/badge-mocha.svg delete mode 100644 misc/badge-nunit-xml.svg create mode 100644 misc/badge-xml.svg delete mode 100644 misc/badge-xunit-xml.svg diff --git a/README.md b/README.md index d1760e38..b1f8635d 100644 --- a/README.md +++ b/README.md @@ -6,16 +6,13 @@ [![GitHub Workflows badge](https://gist.github.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/workflows.svg)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) [![Docker pulls badge](https://gist.github.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/downloads.svg)](https://github.com/users/EnricoMi/packages/container/package/publish-unit-test-result-action) +![Arm badge](misc/badge-arm.svg) ![Ubuntu badge](misc/badge-ubuntu.svg) ![macOS badge](misc/badge-macos.svg) ![Windows badge](misc/badge-windows.svg) -![JUnit badge](misc/badge-junit-xml.svg) -![NUnit badge](misc/badge-nunit-xml.svg) -![XUnit badge](misc/badge-xunit-xml.svg) +![XML badge](misc/badge-xml.svg) ![TRX badge](misc/badge-trx.svg) -![Dart badge](misc/badge-dart.svg) -![Mocha badge](misc/badge-mocha.svg) - +![JS badge](misc/badge-js.svg) [![Test Results](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/tests.svg)](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/tests.svg) @@ -23,7 +20,8 @@ This [GitHub Action](https://github.com/actions) analyses test result files and publishes the results on GitHub. It supports [JSON (Dart, Mocha), TRX (MSTest, VS) and XML (JUnit, NUnit, XUnit) file formats](#generating-test-result-files), and runs on Linux, macOS and Windows. -You can add this action to your GitHub workflow for ![Ubuntu Linux](https://badgen.net/badge/icon/Ubuntu?icon=terminal&label) (e.g. `runs-on: ubuntu-latest`) runners: +You can use this action with ![Ubuntu Linux](misc/badge-ubuntu.svg) runners (e.g. `runs-on: ubuntu-latest`) +or ![ARM Linux](misc/badge-arm.svg) self-hosted runners: ```yaml - name: Publish Test Results @@ -36,8 +34,8 @@ You can add this action to your GitHub workflow for ![Ubuntu Linux](https://badg test-results/**/*.json ``` -Use this for ![macOS](https://badgen.net/badge/icon/macOS?icon=apple&label) (e.g. `runs-on: macos-latest`) -and ![Windows](https://badgen.net/badge/icon/Windows?icon=windows&label) (e.g. `runs-on: windows-latest`) runners: +Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`) +and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: ```yaml - name: Publish Test Results diff --git a/misc/badge-arm.svg b/misc/badge-arm.svg new file mode 100644 index 00000000..2fba6119 --- /dev/null +++ b/misc/badge-arm.svg @@ -0,0 +1,20 @@ + + ARM + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-dart.svg b/misc/badge-dart.svg deleted file mode 100644 index 42ee1507..00000000 --- a/misc/badge-dart.svg +++ /dev/null @@ -1,20 +0,0 @@ - - Dart: JSON - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-js.svg b/misc/badge-js.svg new file mode 100644 index 00000000..c87b6b0b --- /dev/null +++ b/misc/badge-js.svg @@ -0,0 +1,20 @@ + + JS / Dart / Mocha: JSON + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-junit-xml.svg b/misc/badge-junit-xml.svg deleted file mode 100644 index 90ac0c64..00000000 --- a/misc/badge-junit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - JUnit: XML - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-mocha.svg b/misc/badge-mocha.svg deleted file mode 100644 index 169af1a6..00000000 --- a/misc/badge-mocha.svg +++ /dev/null @@ -1,20 +0,0 @@ - - Mocha: JSON - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-nunit-xml.svg b/misc/badge-nunit-xml.svg deleted file mode 100644 index aa954cb0..00000000 --- a/misc/badge-nunit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - NUnit: XML - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-trx.svg b/misc/badge-trx.svg index 7d15af67..010c5b2d 100644 --- a/misc/badge-trx.svg +++ b/misc/badge-trx.svg @@ -1,20 +1,20 @@ - - TRX: OK - + + .Net: TRX + - - - - - + + + + + \ No newline at end of file diff --git a/misc/badge-xml.svg b/misc/badge-xml.svg new file mode 100644 index 00000000..f935934d --- /dev/null +++ b/misc/badge-xml.svg @@ -0,0 +1,20 @@ + + JUnit / NUnit / XUnit: XML + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-xunit-xml.svg b/misc/badge-xunit-xml.svg deleted file mode 100644 index 752b78cc..00000000 --- a/misc/badge-xunit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - XUnit: XML - - - - - - - - - - - - - \ No newline at end of file From 560aeb09aca3f1b0e6a9dec12299d0c4e3929a8f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:24:29 +0200 Subject: [PATCH 10/28] Update urllib3 and charset-normalizer, remove unused dependencies (#507) --- composite/action.yml | 2 +- python/requirements-direct.txt | 4 +--- python/requirements.txt | 8 +++++--- python/test/requirements.txt | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/composite/action.yml b/composite/action.yml index dfdb0df9..8c50cd75 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -196,7 +196,7 @@ runs: continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-6ca4d32816ff499719c9ab474caf6c68 + key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-df386fe4e04a72c96e140f0566a5c849 - name: Install Python dependencies run: | diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt index e0dd2cd2..2b17f6ce 100644 --- a/python/requirements-direct.txt +++ b/python/requirements-direct.txt @@ -2,6 +2,4 @@ humanize==3.14.0 junitparser==3.1.0 lxml==4.9.3 psutil==5.9.5 -PyGithub==2.0.1rc0 -requests==2.31.0 -urllib3==1.26.16 +PyGithub==2.1.1 diff --git a/python/requirements.txt b/python/requirements.txt index ecb99aab..0904d348 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -3,7 +3,7 @@ junitparser==3.1.0 future==0.18.3 lxml==4.9.3 psutil==5.9.5 -PyGithub==2.0.1rc0 +PyGithub==2.1.1 Deprecated==1.2.14 wrapt==1.15.0 PyJWT==2.8.0 @@ -14,6 +14,8 @@ PyGithub==2.0.1rc0 six==1.16.0 requests==2.31.0 certifi==2023.7.22 - charset-normalizer==3.2.0 + charset-normalizer==3.3.0 idna==3.4 - urllib3==1.26.16 + urllib3==2.0.6 + typing_extensions==4.7.1 + urllib3==2.0.6 diff --git a/python/test/requirements.txt b/python/test/requirements.txt index e4db7ef0..a5090624 100644 --- a/python/test/requirements.txt +++ b/python/test/requirements.txt @@ -4,4 +4,3 @@ prettyprinter pytest pyyaml>=5.1 requests -urllib3<2.0.0 From 48fc7ada94dc8d047d37565564ceccba365d7cf2 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Mon, 9 Oct 2023 15:09:53 +0200 Subject: [PATCH 11/28] Do not publish if tests are cancelled (#515) --- .github/workflows/ci-cd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index eff49717..a24080ad 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -53,7 +53,7 @@ jobs: needs: [test-mac, test-lnx, test-win] # we run the action from this branch whenever we can (when it runs in our repo's context) if: > - always() && + ! cancelled() && github.event.sender.login != 'dependabot[bot]' && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) uses: "./.github/workflows/publish.yml" From 78b628159a70e2c81b88e2e50914f1b00fa60a42 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Tue, 10 Oct 2023 15:56:20 +0200 Subject: [PATCH 12/28] Use virtualenv in composite action (#501) --- README.md | 49 -------------------------------------------- composite/action.yml | 47 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index b1f8635d..e2050285 100644 --- a/README.md +++ b/README.md @@ -786,52 +786,3 @@ Self-hosted runners may require setting up a Python environment first: Self-hosted runners for Windows require Bash shell to be installed. Easiest way to have one is by installing Git for Windows, which comes with Git BASH. Make sure that the location of `bash.exe` is part of the `PATH` environment variable seen by the self-hosted runner. - -
-Isolating composite action from your workflow - -Note that the composite action modifies this Python environment by installing dependency packages. -If this conflicts with actions that later run Python in the same workflow (which is a rare case), -it is recommended to run this action as the last step in your workflow, or to run it in an isolated workflow. -Running it in an isolated workflow is similar to the workflows shown in [Use with matrix strategy](#use-with-matrix-strategy). - -To run the composite action in an isolated workflow, your CI workflow should upload all test result files: - -```yaml -build-and-test: - name: "Build and Test" - runs-on: macos-latest - - steps: - - … - - name: Upload Test Results - if: always() - uses: actions/upload-artifact@v3 - with: - name: Test Results - path: "test-results/**/*.xml" -``` - -Your dedicated publish-test-results workflow then downloads these files and runs the action there: - -```yaml -publish-test-results: - name: "Publish Tests Results" - needs: build-and-test - runs-on: windows-latest - # the build-and-test job might be skipped, we don't need to run this job then - if: success() || failure() - - steps: - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action/composite@v2 - with: - files: "artifacts/**/*.xml" -``` -
- diff --git a/composite/action.yml b/composite/action.yml index 8c50cd75..c33b276f 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -186,6 +186,7 @@ runs: ;; Windows*) echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT + echo "pip-options=--user" >> $GITHUB_OUTPUT ;; esac shell: bash @@ -198,12 +199,47 @@ runs: path: ${{ steps.os.outputs.pip-cache }} key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-df386fe4e04a72c96e140f0566a5c849 + - name: Create virtualenv + id: venv + continue-on-error: true + env: + PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} + run: | + echo '##[group]Create virtualenv' + # install virtualenv, if it is not yet installed + python3 -m pip install $PIP_OPTIONS virtualenv + python3 -m virtualenv enricomi-publish-action-venv + # test activating virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac + which python3 + echo '##[endgroup]' + shell: bash + - name: Install Python dependencies + env: + PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} run: | echo '##[group]Install Python dependencies' + if [ "${{ steps.venv.outcome }}" == "success" ] + then + # activate virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac + fi + which python3 + # make sure wheel is installed, which improves installing our dependencies - python3 -m pip install wheel - python3 -m pip install -r $GITHUB_ACTION_PATH/../python/requirements.txt + python3 -m pip install $PIP_OPTIONS wheel + python3 -m pip install $PIP_OPTIONS -r $GITHUB_ACTION_PATH/../python/requirements.txt echo '##[endgroup]' shell: bash @@ -211,6 +247,13 @@ runs: id: test-results run: | echo '##[group]Publish Test Results' + # activate virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac python3 $GITHUB_ACTION_PATH/../python/publish_test_results.py echo '##[endgroup]' env: From ca89ad036b5fcd524c1017287fb01b5139908408 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Tue, 10 Oct 2023 16:26:45 +0200 Subject: [PATCH 13/28] Releasing v2.11.0 --- action.yml | 2 +- python/publish/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/action.yml b/action.yml index 3944bdf7..7a040468 100644 --- a/action.yml +++ b/action.yml @@ -144,7 +144,7 @@ outputs: runs: using: 'docker' - image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.10.0' + image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.11.0' branding: icon: 'check-circle' diff --git a/python/publish/__init__.py b/python/publish/__init__.py index cbe2d9ef..8b1d15e4 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -11,7 +11,7 @@ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError # keep the version in sync with action.yml -__version__ = 'v2.10.0' +__version__ = 'v2.11.0' logger = logging.getLogger('publish') digest_prefix = '[test-results]:data:' From 7aeefc7868616f59c74fc5eefc6ee14acf44b224 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Thu, 12 Oct 2023 10:20:06 +0200 Subject: [PATCH 14/28] Document using relative paths (#519) --- README.md | 32 +++++++++++++++++++++++++- action.yml | 2 +- composite/action.yml | 2 +- python/publish_test_results.py | 13 ++++++++--- python/test/test_action_script.py | 38 +++++++++++++++++++------------ 5 files changed, 67 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index e2050285..8ca62ff1 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,8 @@ or ![ARM Linux](misc/badge-arm.svg) self-hosted runners: test-results/**/*.json ``` +See the [notes on running this action with absolute paths](#running-with-absolute-paths) if you cannot use relative test result file paths. + Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`) and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: @@ -261,7 +263,7 @@ The list of most notable options: |Option|Default Value|Description| |:-----|:-----:|:----------| -|`files`|_no default_|File patterns of test result files. Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| +|`files`|_no default_|File patterns of test result files. Relative paths are known to work best, while the composite action [also works with absolute paths](#running-with-absolute-paths). Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| |`check_name`|`"Test Results"`|An alternative name for the check result. Required to be unique for each instance in one workflow.| |`comment_title`|same as `check_name`|An alternative name for the pull request comment.| |`comment_mode`|`always`|The action posts comments to pull requests that are associated with the commit. Set to:

`always` - always comment
`changes` - comment when changes w.r.t. the target branch exist
`changes in failures` - when changes in the number of failures and errors exist
`changes in errors` - when changes in the number of (only) errors exist
`failures` - when failures or errors exist
`errors` - when (only) errors exist
`off` - to not create pull request comments.| @@ -767,6 +769,34 @@ Set the `gistURL` to the Gist that you want to write the badge file to, in the f You can then use the badge via this URL: https://gist.githubusercontent.com/{user}/{id}/raw/badge.svg
+## Running with absolute paths + +It is known that this action works best with relative paths (e.g. `test-results/**/*.xml`), +but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the composite variant +of this action (`uses: EnricoMi/publish-unit-test-result-action/composite@v2`). + +If you have to use absolute paths with the non-composite variant of this action (`uses: EnricoMi/publish-unit-test-result-action@v2`), +you have to copy files to a relative path first, and then use the relative path: + +```yaml +- name: Copy Test Results + if: always() + run: | + cp -Lpr /tmp/test-results test-results + shell: bash + +- name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: always() + with: + files: | + test-results/**/*.xml + test-results/**/*.trx + test-results/**/*.json +``` + +Using the non-composite variant of this action is recommended as it starts up much quicker. + ## Running as a composite action Running this action as a composite action allows to run it on various operating systems as it diff --git a/action.yml b/action.yml index 7a040468..135a555c 100644 --- a/action.yml +++ b/action.yml @@ -42,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Supports *, **, ?, and []. Use multiline string for multiple patterns. Patterns starting with ! exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' diff --git a/composite/action.yml b/composite/action.yml index c33b276f..0db7e50c 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -42,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Supports *, **, ?, and []. Use multiline string for multiple patterns. Patterns starting with ! exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 7934a299..e1ab1c5b 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -4,6 +4,7 @@ import re import sys from glob import glob +from pathlib import Path from typing import List, Optional, Union, Mapping, Tuple, Any, Iterable, Callable import github @@ -55,7 +56,7 @@ def get_github(auth: github.Auth, seconds_between_writes=seconds_between_writes) -def get_files(multiline_files_globs: str) -> List[str]: +def get_files(multiline_files_globs: str) -> Tuple[List[str], bool]: multiline_files_globs = re.split('\r?\n\r?', multiline_files_globs) included = {str(file) for files_glob in multiline_files_globs @@ -65,7 +66,10 @@ def get_files(multiline_files_globs: str) -> List[str]: for files_glob in multiline_files_globs if files_glob.startswith('!') for file in glob(files_glob[1:], recursive=True)} - return list(included - excluded) + has_absolute = any({Path(pattern).is_absolute() + for files_glob in multiline_files_globs + for pattern in [files_glob[1:] if files_glob.startswith('!') else files_glob]}) + return list(included - excluded), has_absolute def prettify_glob_pattern(pattern: Optional[str]) -> Optional[str]: @@ -77,12 +81,15 @@ def expand_glob(pattern: Optional[str], file_format: Optional[str], gha: GithubA if not pattern: return [] - files = get_files(pattern) + files, has_absolute_patterns = get_files(pattern) file_format = f' {file_format}' if file_format else '' prettyfied_pattern = prettify_glob_pattern(pattern) if len(files) == 0: gha.warning(f'Could not find any{file_format} files for {prettyfied_pattern}') + if has_absolute_patterns: + gha.warning(f'Your file pattern contains absolute paths, please read the notes on absolute paths:') + gha.warning(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths') else: logger.info(f'Reading{file_format} files {prettyfied_pattern} ({get_number_of_files(files)}, {get_files_size(files)})') logger.debug(f'reading{file_format} files {list(files)}') diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index d0929f8c..dae244d9 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -795,7 +795,7 @@ def test_get_files_single(self): with open(filename, mode='w'): pass - files = get_files('file1.txt') + files, _ = get_files('file1.txt') self.assertEqual(['file1.txt'], sorted(files)) def test_get_files_multi(self): @@ -808,7 +808,7 @@ def test_get_files_multi(self): with open(filename, mode='w'): pass - files = get_files(f'file1.txt{sep}file2.txt') + files, _ = get_files(f'file1.txt{sep}file2.txt') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_single_wildcard(self): @@ -821,7 +821,7 @@ def test_get_files_single_wildcard(self): with open(filename, mode='w'): pass - files = get_files(wildcard) + files, _ = get_files(wildcard) self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_multi_wildcard(self): @@ -834,8 +834,9 @@ def test_get_files_multi_wildcard(self): with open(filename, mode='w'): pass - files = get_files(f'*1.txt{sep}*3.bin') + files, absolute = get_files(f'*1.txt{sep}*3.bin') self.assertEqual(['file1.txt', 'file3.bin'], sorted(files)) + self.assertFalse(absolute) def test_get_files_subdir_and_wildcard(self): filenames = [os.path.join('sub', 'file1.txt'), @@ -851,7 +852,7 @@ def test_get_files_subdir_and_wildcard(self): with open(filename, mode='w'): pass - files = get_files('sub/*.txt') + files, _ = get_files('sub/*.txt') self.assertEqual([os.path.join('sub', 'file1.txt'), os.path.join('sub', 'file2.txt')], sorted(files)) @@ -876,7 +877,7 @@ def test_get_files_recursive_wildcard(self): with open(filename, mode='w'): pass - files = get_files(pattern) + files, _ = get_files(pattern) self.assertEqual(sorted(expected), sorted(files)) def test_get_files_symlinks(self): @@ -895,7 +896,7 @@ def test_get_files_symlinks(self): pass os.symlink(os.path.join(path, 'sub2'), os.path.join(path, 'sub1', 'sub2'), target_is_directory=True) - files = get_files(pattern) + files, _ = get_files(pattern) self.assertEqual(sorted(expected), sorted(files)) def test_get_files_character_range(self): @@ -906,7 +907,7 @@ def test_get_files_character_range(self): with open(filename, mode='w'): pass - files = get_files('file[0-2].*') + files, _ = get_files('file[0-2].*') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_multi_match(self): @@ -917,7 +918,7 @@ def test_get_files_multi_match(self): with open(filename, mode='w'): pass - files = get_files('*.txt\nfile*.txt\nfile2.*') + files, _ = get_files('*.txt\nfile*.txt\nfile2.*') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_absolute_path_and_wildcard(self): @@ -928,8 +929,9 @@ def test_get_files_absolute_path_and_wildcard(self): with open(filename, mode='w'): pass - files = get_files(os.path.join(path, '*')) + files, absolute = get_files(os.path.join(path, '*')) self.assertEqual([os.path.join(path, file) for file in filenames], sorted(files)) + self.assertTrue(absolute) def test_get_files_exclude_only(self): filenames = ['file1.txt', 'file2.txt', 'file3.bin'] @@ -939,7 +941,7 @@ def test_get_files_exclude_only(self): with open(filename, mode='w'): pass - files = get_files('!file*.txt') + files, _ = get_files('!file*.txt') self.assertEqual([], sorted(files)) def test_get_files_include_and_exclude(self): @@ -950,12 +952,12 @@ def test_get_files_include_and_exclude(self): with open(filename, mode='w'): pass - files = get_files('*.txt\n!file1.txt') + files, _ = get_files('*.txt\n!file1.txt') self.assertEqual(['file2.txt'], sorted(files)) def test_get_files_with_mock(self): with mock.patch('publish_test_results.glob') as m: - files = get_files('*.txt\n!file1.txt') + files, _ = get_files('*.txt\n!file1.txt') self.assertEqual([], files) self.assertEqual([mock.call('*.txt', recursive=True), mock.call('file1.txt', recursive=True)], m.call_args_list) @@ -1124,9 +1126,17 @@ def test_parse_files_no_matches(self): gha.warning.assert_has_calls([ mock.call(f'Could not find any JUnit XML files for {missing_junit}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any NUnit XML files for {missing_nunit}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any XUnit XML files for {missing_xunit}'), - mock.call(f'Could not find any TRX files for {missing_trx}') + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'Could not find any TRX files for {missing_trx}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), ]) gha.error.assert_not_called() From d826f852b6b65be05c9bcc086cfc7e181255d464 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Thu, 12 Oct 2023 11:44:09 +0200 Subject: [PATCH 15/28] CI: Fix issues with uploaded changed expectation files (#520) --- .github/actions/test/action.yml | 9 +++- .../junit-xml/jest/jest-junit.annotations | 4 +- .../junit-xml/junit.multiresult.annotations | 6 ++- .../mocha/latex-utensils.annotations | 14 ++++--- .../junit-xml/no-cases-but-tests.annotations | 4 +- .../test/files/junit-xml/no-cases.annotations | 4 +- .../junit-xml/pytest/junit.fail.annotations | 4 +- .../pytest/junit.gloo.elastic.annotations | 4 +- .../junit.gloo.elastic.spark.tf.annotations | 4 +- ...junit.gloo.elastic.spark.torch.annotations | 4 +- .../pytest/junit.gloo.standalone.annotations | 4 +- .../pytest/junit.gloo.static.annotations | 4 +- .../pytest/junit.mpi.integration.annotations | 4 +- .../pytest/junit.mpi.standalone.annotations | 4 +- .../pytest/junit.mpi.static.annotations | 4 +- .../junit.spark.integration.1.annotations | 4 +- .../junit.spark.integration.2.annotations | 4 +- ...ch.spark.diff.DiffOptionsSuite.annotations | 4 +- .../testsuite-in-testsuite.annotations | 4 +- .../junit-xml/testsuite-root.annotations | 4 +- .../files/junit-xml/tst/disabled.annotations | 12 ++++-- .../files/junit-xml/xunit/xunit.annotations | 4 +- .../nunit/mstest/clicketyclackety.annotations | 16 ++++--- .../files/nunit/mstest/pickles.annotations | 4 +- .../nunit/mstest/timewarpinc.annotations | 4 +- .../nunit3/jenkins/NUnit-correct2.annotations | 14 ++++--- .../nunit3/jenkins/NUnit-correct3.annotations | 4 +- .../nunit3/jenkins/NUnit-failure.annotations | 4 +- .../jenkins/NUnit-healthReport.annotations | 4 +- .../nunit3/jenkins/NUnit-ignored.annotations | 4 +- .../jenkins/NUnit-issue1077.annotations | 4 +- .../jenkins/NUnit-issue33493.annotations | 10 +++-- .../jenkins/NUnit-issue44527.annotations | 42 +++++++++++-------- .../jenkins/NUnit-issue48478.annotations | 4 +- .../jenkins/NUnit-issue50162.annotations | 4 +- .../jenkins/NUnit-issue5674.annotations | 4 +- .../jenkins/NUnit-issue6353.annotations | 4 +- .../jenkins/NUnit-multinamespace.annotations | 4 +- .../jenkins/NUnit-sec1752-file.annotations | 4 +- .../jenkins/NUnit-sec1752-https.annotations | 4 +- .../nunit3/jenkins/NUnit-simple.annotations | 4 +- .../nunit/nunit3/jenkins/NUnit.annotations | 4 +- .../nunit3/jenkins/NUnitUnicode.annotations | 4 +- .../test/files/trx/mstest/pickles.annotations | 4 +- .../nunit/FluentValidation.Tests.annotations | 4 +- .../trx/nunit/NUnit-net461-sample.annotations | 4 +- .../NUnit-netcoreapp3.1-sample.annotations | 4 +- .../files/trx/nunit/SilentNotes.annotations | 4 +- .../files/trx/xunit/dotnet-trx.annotations | 4 +- .../trx/xunit/xUnit-net461-sample.annotations | 4 +- .../xUnit-netcoreapp3.1-sample.annotations | 4 +- ...mi_YAMILEX 2015-10-24 04_18_59.annotations | 4 +- .../test/files/xunit/mstest/fixie.annotations | 10 +++-- .../mstest/jenkinsci/testcase1.annotations | 4 +- .../mstest/jenkinsci/testcase2.annotations | 10 +++-- .../mstest/jenkinsci/testcase3.annotations | 4 +- .../mstest/jenkinsci/testcase4.annotations | 4 +- .../mstest/jenkinsci/testcase5.annotations | 4 +- .../mstest/jenkinsci/testcase6.annotations | 4 +- .../files/xunit/mstest/pickles.annotations | 10 +++-- 60 files changed, 191 insertions(+), 158 deletions(-) diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml index 9bf1001c..a07fbff1 100644 --- a/.github/actions/test/action.yml +++ b/.github/actions/test/action.yml @@ -82,8 +82,12 @@ runs: if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]] then - zip changes.zip $(git diff --name-only) $(git ls-files -o --exclude-standard) - exit 1 + # we only upload the changed files if we can find zip + if which zip + then + (git diff --name-only && git ls-files -o --exclude-standard) | xargs -d "\n" zip changed-expectations.zip + exit 1 + fi fi shell: bash - name: Upload changed expectation files @@ -92,6 +96,7 @@ runs: with: name: Changed expectations path: changed-expectations.zip + if-no-files-found: error - name: PyTest env: diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations index d045baf9..a249cf66 100644 --- a/python/test/files/junit-xml/jest/jest-junit.annotations +++ b/python/test/files/junit-xml/jest/jest-junit.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations index 8107e6c7..843e9d7d 100644 --- a/python/test/files/junit-xml/junit.multiresult.annotations +++ b/python/test/files/junit-xml/junit.multiresult.annotations @@ -12,7 +12,8 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' 'tests\u20031 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '1 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' @@ -21,7 +22,8 @@ 'blob/VERSION/README.md#the-symbols "test errors")\n4 runs\u2006\u2003' '-2 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20033 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '3 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20032 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations index aa6fca0f..9a0e673a 100644 --- a/python/test/files/junit-xml/mocha/latex-utensils.annotations +++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations @@ -10,22 +10,24 @@ '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 ' 'suites\u2004\u2003\u20020s ' '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n101 ' - 'tests\u2003101 ' + 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' + '101 tests\u2003101 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n109 runs\u2006\u2003' '109 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv' '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK' 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n', diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations index c11559f9..f00f7925 100644 --- a/python/test/files/junit-xml/no-cases-but-tests.annotations +++ b/python/test/files/junit-xml/no-cases-but-tests.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MS+bmCo7V8ol1DePWg/th8SgcGE3YEtLhvmvMe7' 'ZeShJDETtcJPpfKAFHqkWxIhpMQfQ6975Z5yKXWuAqFrhuSXOe4AjSYnYT/HkBNCXSZd0' diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations index 95c7030e..2e91b4cb 100644 --- a/python/test/files/junit-xml/no-cases.annotations +++ b/python/test/files/junit-xml/no-cases.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations index dcda56f5..d8e1b75d 100644 --- a/python/test/files/junit-xml/pytest/junit.fail.annotations +++ b/python/test/files/junit-xml/pytest/junit.fail.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfzExssYghA3IpgFKuPdXVEUuzez2dm5BqM8H1hTMe4jhBemiCKAs4QtIR3CderzHn' '2UkkT3iQW25/kWWoD5CYXokExNBqPNvWuWuZu/WuIilrhsSbeuEAiexfws+HECiWEEJ90' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations index efe1f950..15ef5279 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd' 'EFKaOpi1lhJ1NeZgGaRPlQiBazwbC9xXj/grcovcSfXOJvTVpjwBPki7lF8PMCyjZFT+I' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations index 85307df2..4845e007 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC' '8MLigVTV3MCns0WcwSNhLlY0K0+BgMJhfj/QveovQSf3KJvzVltQZP8FzMLZKfF82Ojyn' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations index 31c9b6f5..8c469e9e 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN' 'cP9MZJ+TMz7GTSf68ELJkYETVGg25LRX9nwVu8vcCfXOBvTep1BUsQL2Ymwc8LUe9HxOM' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations index 263648dc..e440d68c 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv' 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations index e0fd1fc4..160c5610 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o' 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations index 7d09ff5a..f84b911f 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttAYGy9jCGLciGAWqIx3d/EL3Zt5yewwodEeetFUAnzE8MEYSQZ0NmHHzE9IX/vuwU' 'elSrHgxqL+xCTRFEITOXoMRfv20sxzN/+1i7PYxXlLuXXFwPAs4WcJxwk6KM9l3gAAAA=' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations index 51dd8a65..695f8c8d 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf' 'vCYL2UwTSfmWGPgUdoAQuJMgmFaDAa9Fsqhv0LPuLr3Zzlbs5r0qwrOIK4mJ0EPy/3HdY' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations index dfefec24..1b881f0f 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr' 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK' 'xX6mcWPNjUUUwS10JoIkOfIb/HYthF8BWp93CWezivKbNt6Bi+Jews4boBWo1x8eMAAAA' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations index 70a4071b..b6ad16ba 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR' 'hC0jqb5jULbGRqFkbBSqJkMSF6fAwmx8W8f8FbvL2LP7mLvzXtrYVI8CwRZiWPEwEjqVj' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations index cf3cc69e..77d9cda4 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr' '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf' '2CcsGYaKpkJtjI8L8aNMwkchY9osXHYFi5GO9f8Bapd/End/G3ZuyygCd4LuFGLY8TfGY' diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations index 6c5b09ba..0a96b20c 100644 --- a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igSz8TYkZLlrHqKfr3VlOz25t5MBdfQCvLR9Y1jFsPrsDsUTgwB2FPSIcL15D3ZL' '2Uf7HBSaItYhGgf0IhGkwG/ZF7Yda5l79a5CoWuW5Js+/gCNJidhX8fgDdy7133QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations index 94a8b872..6ada1666 100644 --- a/python/test/files/junit-xml/testsuite-in-testsuite.annotations +++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations index 94a8b872..6ada1666 100644 --- a/python/test/files/junit-xml/testsuite-root.annotations +++ b/python/test/files/junit-xml/testsuite-root.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations index 981189ca..db795d3e 100644 --- a/python/test/files/junit-xml/tst/disabled.annotations +++ b/python/test/files/junit-xml/tst/disabled.annotations @@ -13,18 +13,22 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n31 ' 'tests\u2003\u205f\u20046 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20035 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '5 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200319 ' + 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' + '19 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' 'blob/VERSION/README.md#the-symbols "test errors")\n31 runs\u2006\u2003' '11 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200319 ' + 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' + '19 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations index 4fa7b5d3..702ffbd0 100644 --- a/python/test/files/junit-xml/xunit/xunit.annotations +++ b/python/test/files/junit-xml/xunit/xunit.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations index 20bedd9c..663345cb 100644 --- a/python/test/files/nunit/mstest/clicketyclackety.annotations +++ b/python/test/files/nunit/mstest/clicketyclackety.annotations @@ -13,19 +13,23 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n22 ' 'tests\u200312 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200310 ' + 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' + '10 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n23 runs\u2006\u2003' '13 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200310 ' + 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' + '10 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv' '9g9kHr+FklB1z1ft4UmDgpyYroMG8wnEk55Ps3lqAIE9e+FNQ67awFYsiX8LuSzwvzas/' diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations index 5080096e..ddf7aa8d 100644 --- a/python/test/files/nunit/mstest/pickles.annotations +++ b/python/test/files/nunit/mstest/pickles.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations index 717924b9..7174fd3c 100644 --- a/python/test/files/nunit/mstest/timewarpinc.annotations +++ b/python/test/files/nunit/mstest/timewarpinc.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv' '0I4e1CPfsYQhNiIYAqcjH+3IgjednbbObkGozyf2NAx7iOED5aIIoCzhCMhDaFMKc8+Sk' 'lFX4sNjl+hBZjfi0J0mE8w2uJ7Yqt7udoSN7LErUu6fYdAkBPzq+DXDXGDl7HdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations index 2d1f8db5..16eaf98a 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations @@ -10,22 +10,24 @@ '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002' '0s ' '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n183 ' - 'tests\u2003183 ' + 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' + '183 tests\u2003183 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n218 runs\u2006\u2003' '218 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr' '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN' 'vmod4Hrf9qgi3/6K2C+SMMosNkMKxXs67aBE8yN28xchaMnPe0WxYghnQJPyp5nNtosNP' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations index 33356b44..566ebf06 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV' 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations index aaacf0d5..18f21f4a 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfx0XoYQ1LhRwCxQGe/uykfo5s1M3s03OFfHZzZ0jLsA/ocloPRgDWFPSIP/pqlk4Y' 'JSVIy1OOBq32KTcGZbKlZEi/mCwRTfF1td4mqL3Mgity5ltQZPkBNzu+TPC/n9SCLdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations index 0bfffef3..e869a0e0 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv' '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys' 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations index 45560b17..c57eb736 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/Cz8zKGIMRGBdPCynh3KyKymzfTvlNa2AzJUXSNkBQhFJgjqgDeMbaMPIRnGr48Ud' 'Q63+ZihYOLvhRWwVa/TwbRY24wus/3xFr38m9LXMkS1y7t9x0CQ06CFiWvGx5uWF7dAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations index 0c8261f0..900ef576 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dyiiDv1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawjbjpgef3992q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMC7eTeEN4AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations index fd92a858..79afa56f 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations @@ -12,18 +12,20 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' 'tests\u20031 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '1 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n2 runs\u2006\u20032 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/' 'JRyvwxixX2n9ACDIn2FQrRYTYYbends+Q+fmrlaR1LXLek2zYIBHkxvwh+XlEX1VPdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations index 0bbe872e..cfbe8ada 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations @@ -10,10 +10,11 @@ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' '14m 11s ' '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' + 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' + '146 tests\u2003\u205f\u2004\u205f\u20046 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '140 ' @@ -21,13 +22,14 @@ 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' '6 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '144 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -1935,10 +1937,11 @@ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' '14m 11s ' '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' + 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' + '146 tests\u2003\u205f\u2004\u205f\u20046 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '140 ' @@ -1946,13 +1949,14 @@ 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' '6 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '144 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -3674,10 +3678,11 @@ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' '14m 11s ' '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' + 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' + '146 tests\u2003\u205f\u2004\u205f\u20046 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '140 ' @@ -3685,13 +3690,14 @@ 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' '6 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' '144 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations index 98d07c08..64a49691 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfwUJl7GENS4EcEsUBnv7oKi0L2Zl8zJF1Cz5QNrKsatBxehJZg8CgdGh68npseFr0' 't7tF7KUmxwkKg/sQhQhZgRDb4GvU69MPPcw38tchaLnLek2XdwBO9idhX8ugG5zrfD3gA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations index b767ee76..d5dc87ef 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLfwkFl7GEIS4kY9ZoDLe3RU1YDdvZvIOrsGowCfWNYyHBDFDT7AkFBG8I2wJaYj3NH' '55DklKKoZSbLDX71kLML+HQvT4XjC5z3fHWvdwsWWuZJlrl/TWQiR4Ewur4OcFmZnWM90' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations index d3aaa892..a8d7eebf 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MywqAIBREf' '0VctyiCoH5GxIwu+YirrqJ/72ZK7ubMDOfiGxgd+MKGjvGQIGYYCdaEMoJ3hD0hDfGd5p' 'pFSEpRMf3FAWf7FpsEU2xfoRE9lgsmV31vbHUf/7bMjSxz61LeWogEJbGwS34/WLAikt0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations index b4594abc..0b7da2a7 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHTRuXsYQlNgoYlqYjHe3KmrZ/ntt3q4dLCPpTjWV0pQgfjAkNBHCytgy8iGS3D0la/' 'NvFjNshXAGlkKMiAHZ1GwwrW/vmjL38F+7WcRuli0bvIfIkJeiyejjBNBleN/dAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations index a770afa3..591f3874 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdRYeRlCUOJGPmaBynh3F8SI3byZyTu5BrMGPrOhYzwkiAVGgiWhjOAdYU9IQ8zT9G' 'YRklL/YoejfQstwfyKFdFjbTC515djq3v4sxVuZIVbl/LWQiSoiYVN8usGDjGDkd0AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations index 10603db6..2e3d6903 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations index a5323ee4..6bb39b9e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations index 3bc0ccac..8436a283 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations index 783dc9ea..4976bb2b 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations index 678c8cad..0872da7c 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/trx/mstest/pickles.annotations b/python/test/files/trx/mstest/pickles.annotations index c2c19739..b8073193 100644 --- a/python/test/files/trx/mstest/pickles.annotations +++ b/python/test/files/trx/mstest/pickles.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLTRaeRlCUONGAbNAZby7Kx+hmzczeTff4Fwdn9nQMe4C+B+WgNKDNYQ9IQ3+m6aShQ' 'tKUTHW4oCrfYtNwpltqVgRLeYLBlN8X2x1iastciOL3LqU1Ro8QU7M7ZI/L5ec2abdAAA' diff --git a/python/test/files/trx/nunit/FluentValidation.Tests.annotations b/python/test/files/trx/nunit/FluentValidation.Tests.annotations index 5afe0ced..53ae1dc3 100644 --- a/python/test/files/trx/nunit/FluentValidation.Tests.annotations +++ b/python/test/files/trx/nunit/FluentValidation.Tests.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u2004\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dzDsEP1MiBUtacaqp+jf28yybm/mwex8AjN63rG6YtxHCC8MEVUAtxJKQjrCdbWiea' 'j3UeukZFELbDlwi0mBISFeMSI6zAbjWpoX/JO3KcXEn2Dib087ayEQ5MX8rPhxArdpBif' diff --git a/python/test/files/trx/nunit/NUnit-net461-sample.annotations b/python/test/files/trx/nunit/NUnit-net461-sample.annotations index 54e3926a..adba4f2a 100644 --- a/python/test/files/trx/nunit/NUnit-net461-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-net461-sample.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations index ec15c572..03764a41 100644 --- a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/SilentNotes.annotations b/python/test/files/trx/nunit/SilentNotes.annotations index b4b7860d..fe9ec8ce 100644 --- a/python/test/files/trx/nunit/SilentNotes.annotations +++ b/python/test/files/trx/nunit/SilentNotes.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCopcxBCVuRDELVMa7u/hB7HbmZWfnGszoeMeqgnEXwCcYAkoPdiUsCWnwcRLtC7' '0LSpFpxGdm2OJ7nYyWYPJCPyJafAyGNSXj/SveIgteIutdnOeUXRbwBM/F3CT5cQKN/0L' diff --git a/python/test/files/trx/xunit/dotnet-trx.annotations b/python/test/files/trx/xunit/dotnet-trx.annotations index 32397e73..8d72da4d 100644 --- a/python/test/files/trx/xunit/dotnet-trx.annotations +++ b/python/test/files/trx/xunit/dotnet-trx.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u20035 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLaSw8TKGIMaNfMwClfHuroCK3byZ3XfwBYwOfGSiYzwkiC/MCWUE7wh7QhpinsQDU0' 'hKUTN8xQZ7/S7FIsH8LjSix2rE5F7hnVtf4U+XubFlbmXKWwuRoCYWVsnPC2b3Tg/fAAA' diff --git a/python/test/files/trx/xunit/xUnit-net461-sample.annotations b/python/test/files/trx/xunit/xUnit-net461-sample.annotations index 3a4d596b..8c80f059 100644 --- a/python/test/files/trx/xunit/xUnit-net461-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-net461-sample.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations index f200c1a7..cf741d59 100644 --- a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations index 8982261b..e1825e61 100644 --- a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations +++ b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' '1 files\u2004\u2002\u2003\u2003\u205f\u20042 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLcRECy9jCErciGAWqIx3d/GD2O3M28zONZjJ856JinEfIWQYI8oAzhI2HTEtIW1N+8' 'Lgo1LJfGKBLQmRjZZgfi8TokMyNRmMNhfTXQZvLnqXKHIXlzXl1hUCwXMxP0t+nB5bCu/' diff --git a/python/test/files/xunit/mstest/fixie.annotations b/python/test/files/xunit/mstest/fixie.annotations index b7bf755f..99c5fb52 100644 --- a/python/test/files/xunit/mstest/fixie.annotations +++ b/python/test/files/xunit/mstest/fixie.annotations @@ -12,18 +12,20 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n5 ' 'tests\u20031 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '1 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n7 runs\u2006\u20033 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '1 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QSRZcJsaKh0hh1Fd29Scxs998b5p18hm2yvGdVwbj14ALUBKNH6cBowo6QDu45Ne' '8erFcqPkaxwvETs4SNhEhiQjRIpiSDXj+9Ns43JxJ/tcBZLHDeUmbfwRHExewi+XUDvcl' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations index b5213289..fba3c114 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XsYQhLiRj1mgMt5dQFDo5s1M3kUlKOHoQqaBUBfAf7AFZB6siThGjINP01zz6g' 'Ln5VuKA86ukAxUVwhEi0WIwVRfiq3u5d+WuZFlbl3cag0+QknE7YzeD2gV0DndAAAA\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations index 45804600..856c56a7 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations @@ -12,18 +12,20 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' 'tests\u20031 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n3 runs\u2006\u20031 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '1 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XoYQhLiRj1mWynh3UZBgN29m8k5uwOrIFzYNjMcE1GBNKAmCzzhmzAN905tFTE' 'r9ix2O/i2MBPsrNGLA2mDyj2+usdcVLrbGVda4d6ngHFCGmljcJL9uwvXP6N0AAAA=\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations index 30727b7f..e8e665fa 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfw0xMsYgho3opgFKuPdXRAUuzezmzn5DHqyvGdNxbj14F4YPUoHZidshSBBJxeOXd' '6D9Uql7yRWOH5ilqB/YkI0SKYmg37PvTDL3MNfLXIRi1y2lNk2cARpMbtIft14m53n3wA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations index 95c7030e..2e91b4cb 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations index 25934f35..7d4e1989 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20030 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igQ/1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawj7lpgef39d2q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMCmAJ3I94AAAA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations index febae910..a6308b2a 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations @@ -18,8 +18,8 @@ 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' 'files\u2004\u2002\u2003\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/xunit/mstest/pickles.annotations b/python/test/files/xunit/mstest/pickles.annotations index 1a9136aa..fa6b4065 100644 --- a/python/test/files/xunit/mstest/pickles.annotations +++ b/python/test/files/xunit/mstest/pickles.annotations @@ -12,18 +12,20 @@ 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n3 ' 'tests\u20032 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' 'b/VERSION/README.md#the-symbols "failed tests")\n4 runs\u2006\u20033 ' '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' + 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' + '0 ' '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLVSsvIwhqHEjH7NAZby7qwLSzZuZvJOvoBfPR9Y1jPsIocAcUQZwlrAlpCE8k8h58l' 'EpKvq/2OGo39MqQSfbVyyIDtMFo318Q4pZJwr/tpcr2cu1SzljIBCkxPwm+XUDYSIL8t0' From 8cdbc41a5bcec7bbe3ffbcfdc8bf72945f2dd3f2 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Mon, 13 Nov 2023 15:31:08 +0100 Subject: [PATCH 16/28] Handle incomplete / none JSON elements (#530) --- python/publish/__init__.py | 20 +++++++++++++++++++- python/publish/publisher.py | 21 ++++++++------------- python/test/test_publish.py | 18 +++++++++++++++++- python/test/test_publisher.py | 29 +++++++++++++++++++++++++++-- 4 files changed, 71 insertions(+), 17 deletions(-) diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 8b1d15e4..7bb04106 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -5,7 +5,7 @@ import re from collections import defaultdict from dataclasses import dataclass -from typing import List, Any, Union, Optional, Tuple, Mapping, Iterator, Set, Iterable +from typing import List, Any, Union, Optional, Tuple, Mapping, Iterator, Set, Iterable, Dict from publish.unittestresults import Numeric, UnitTestSuite, UnitTestCaseResults, UnitTestRunResults, \ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError @@ -151,6 +151,24 @@ def removed_skips(self) -> Optional[Set[str]]: return skipped_before.intersection(removed) +def get_json_path(json: Dict[str, Any], path: Union[str, List[str]]) -> Any: + if isinstance(path, str): + path = path.split('.') + + if path[0] not in json: + return None + + elem = json[path[0]] + + if len(path) > 1: + if isinstance(elem, dict): + return get_json_path(elem, path[1:]) + else: + return None + else: + return elem + + def utf8_character_length(c: int) -> int: if c >= 0x00010000: return 4 diff --git a/python/publish/publisher.py b/python/publish/publisher.py index 5fdfb4e8..16869d79 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -13,7 +13,7 @@ from github.PullRequest import PullRequest from github.IssueComment import IssueComment -from publish import __version__, comment_mode_off, digest_prefix, restrict_unicode_list, \ +from publish import __version__, get_json_path, comment_mode_off, digest_prefix, restrict_unicode_list, \ comment_mode_always, comment_mode_changes, comment_mode_changes_failures, comment_mode_changes_errors, \ comment_mode_failures, comment_mode_errors, \ get_stats_from_digest, digest_header, get_short_summary, get_long_summary_md, \ @@ -206,7 +206,7 @@ def publish(self, check_run = None before_check_run = None if self._settings.compare_earlier: - before_commit_sha = self._settings.event.get('before') + before_commit_sha = get_json_path(self._settings.event, 'before') logger.debug(f'comparing against before={before_commit_sha}') before_check_run = self.get_check_run(before_commit_sha) else: @@ -227,8 +227,8 @@ def publish(self, logger.info('Commenting on pull requests disabled') def get_pull_from_event(self) -> Optional[PullRequest]: - number = self._settings.event.get('pull_request', {}).get('number') - repo = self._settings.event.get('pull_request', {}).get('base', {}).get('repo', {}).get('full_name') + number = get_json_path(self._settings.event, 'pull_request.number') + repo = get_json_path(self._settings.event, 'pull_request.base.repo.full_name') if number is None or repo is None or repo != self._settings.repo: return None @@ -390,7 +390,7 @@ def publish_check(self, before_stats = None before_check_run = None if self._settings.compare_earlier: - before_commit_sha = self._settings.event.get('before') + before_commit_sha = get_json_path(self._settings.event, 'before') logger.debug(f'comparing against before={before_commit_sha}') before_check_run = self.get_check_run(before_commit_sha) before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None @@ -686,7 +686,7 @@ def get_base_commit_sha(self, pull_request: PullRequest) -> Optional[str]: if self._settings.event: # for pull request events we take the other parent of the merge commit (base) if self._settings.event_name == 'pull_request': - return self._settings.event.get('pull_request', {}).get('base', {}).get('sha') + return get_json_path(self._settings.event, 'pull_request.base.sha') # for workflow run events we should take the same as for pull request events, # but we have no way to figure out the actual merge commit and its parents # we do not take the base sha from pull_request as it is not immutable @@ -728,18 +728,13 @@ def get_pull_request_comments(self, pull: PullRequest, order_by_updated: bool) - "POST", self._settings.graphql_url, input=query ) - return data \ - .get('data', {}) \ - .get('repository', {}) \ - .get('pullRequest', {}) \ - .get('comments', {}) \ - .get('nodes') + return get_json_path(data, 'data.repository.pullRequest.comments.nodes') def get_action_comments(self, comments: List[Mapping[str, Any]], is_minimized: Optional[bool] = False): comment_body_start = f'## {self._settings.comment_title}\n' comment_body_indicators = ['\nresults for commit ', '\nResults for commit '] return list([comment for comment in comments - if comment.get('author', {}).get('login') == self._settings.actor + if get_json_path(comment, 'author.login') == self._settings.actor and (is_minimized is None or comment.get('isMinimized') == is_minimized) and comment.get('body', '').startswith(comment_body_start) and any(indicator in comment.get('body', '') for indicator in comment_body_indicators)]) diff --git a/python/test/test_publish.py b/python/test/test_publish.py index f49f0b2a..9001f3c1 100644 --- a/python/test/test_publish.py +++ b/python/test/test_publish.py @@ -5,7 +5,7 @@ import mock from publish import __version__, Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ - get_error_annotation, get_digest_from_stats, \ + get_json_path, get_error_annotation, get_digest_from_stats, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, \ get_magnitude, get_delta, as_short_commit, as_delta, as_stat_number, as_stat_duration, get_stats_from_digest, \ @@ -29,6 +29,22 @@ class PublishTest(unittest.TestCase): old_locale = None details = [UnitTestSuite('suite', 7, 3, 2, 1, 'std-out', 'std-err')] + def test_get_json_path(self): + detail = {'a': 'A', 'b': 'B', 'c': ['d'], 'e': {}, 'f': None} + json = {'id': 1, 'name': 'Name', 'detail': detail} + + self.assertEqual(None, get_json_path(json, 'not there')) + self.assertEqual(1, get_json_path(json, 'id')) + self.assertEqual('Name', get_json_path(json, 'name')) + self.assertEqual(detail, get_json_path(json, 'detail')) + self.assertEqual('A', get_json_path(json, 'detail.a')) + self.assertEqual(None, get_json_path(json, 'detail.a.g')) + self.assertEqual(['d'], get_json_path(json, 'detail.c')) + self.assertEqual({}, get_json_path(json, 'detail.e')) + self.assertEqual(None, get_json_path(json, 'detail.e.g')) + self.assertEqual(None, get_json_path(json, 'detail.f')) + self.assertEqual(None, get_json_path(json, 'detail.f.g')) + def test_test_changes(self): changes = SomeTestChanges(['removed-test', 'removed-skip', 'remain-test', 'remain-skip', 'skip', 'unskip'], ['remain-test', 'remain-skip', 'skip', 'unskip', 'add-test', 'add-skip'], diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index 2306c398..fba4f0fc 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -13,7 +13,7 @@ import mock from github import Github, GithubException -from publish import __version__, comment_mode_off, comment_mode_always, \ +from publish import __version__, get_json_path, comment_mode_off, comment_mode_always, \ comment_mode_changes, comment_mode_changes_failures, comment_mode_changes_errors, \ comment_mode_failures, comment_mode_errors, Annotation, default_annotations, \ get_error_annotation, digest_header, get_digest_from_stats, \ @@ -888,6 +888,22 @@ def test_get_pull_from_event(self): actual = publisher.get_pull_from_event() self.assertIs(actual, pr) repo.get_pull.assert_called_once_with(1234) + repo.get_pull.reset_mock() + + # test with none in pull request + for event in [ + {}, + {'pull_request': None}, + {'pull_request': {'number': 1234, 'base': None}}, + {'pull_request': {'number': 1234, 'base': {'repo': None}}}, + {'pull_request': {'number': 1234, 'base': {'repo': {}}}}, + ]: + settings = self.create_settings(event=event) + publisher = Publisher(settings, gh, gha) + + actual = publisher.get_pull_from_event() + self.assertIsNone(actual) + repo.get_pull.assert_not_called() def do_test_get_pulls(self, settings: Settings, @@ -911,7 +927,7 @@ def do_test_get_pulls(self, else: gh.search_issues.assert_not_called() if event_pull_request is not None and \ - settings.repo == settings.event.get('pull_request', {}).get('base', {}).get('repo', {}).get('full_name'): + settings.repo == get_json_path(settings.event, 'pull_request.base.repo.full_name'): repo.get_pull.assert_called_once_with(event_pull_request.number) commit.get_pulls.assert_not_called() else: @@ -2621,6 +2637,15 @@ def test_get_pull_request_comments_order_updated(self): 'Results for commit dee59820.\u2003± Comparison against base commit 70b5dd18.\n', 'isMinimized': False }, + # malformed comments + { + 'id': 'comment nine', + 'author': None, + }, + { + 'id': 'comment ten', + 'author': {}, + }, ] def test_get_action_comments(self): From 1b521c1c1af087692eae017cabd8792964fca4bb Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Tue, 28 Nov 2023 08:50:42 +0100 Subject: [PATCH 17/28] Add badge JSON to GIST (#536) --- .github/workflows/badges.yml | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/.github/workflows/badges.yml b/.github/workflows/badges.yml index af1a1334..61b39ecc 100644 --- a/.github/workflows/badges.yml +++ b/.github/workflows/badges.yml @@ -2,7 +2,7 @@ name: Badges on: schedule: - - cron: '24 03 * * 3' + - cron: '52 07 * * *' workflow_dispatch: permissions: {} @@ -32,6 +32,10 @@ jobs: color: blue path: downloads.svg + - name: Create JSON + run: + echo '{"subject": "Docker pulls", "status": "${{ steps.downloads.outputs.total_downloads }} (${{ steps.downloads.outputs.recent_downloads_per_day }}/day)", "color": "blue"}' > downloads.json + - name: Upload badge to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: @@ -39,6 +43,13 @@ jobs: gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 file: downloads.svg + - name: Upload JSON to Gist + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + file: downloads.json + workflows: name: Dependent workflows runs-on: ubuntu-latest @@ -61,9 +72,20 @@ jobs: color: blue path: workflows.svg + - name: Create JSON + run: + echo '{"subject": "GitHub Workflows", "status": "${{ steps.workflows.outputs.total_workflows }}", "color": "blue"}' > workflows.json + - name: Upload badge to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 file: workflows.svg + + - name: Upload JSON to Gist + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + file: workflows.json From d47d57bddde919c2b644b54e0999b2fafca0ac6f Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Tue, 28 Nov 2023 08:53:06 +0100 Subject: [PATCH 18/28] Revert cron time, fix yaml syntax --- .github/workflows/badges.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/badges.yml b/.github/workflows/badges.yml index 61b39ecc..542f2e2d 100644 --- a/.github/workflows/badges.yml +++ b/.github/workflows/badges.yml @@ -2,7 +2,7 @@ name: Badges on: schedule: - - cron: '52 07 * * *' + - cron: '24 03 * * 3' workflow_dispatch: permissions: {} @@ -33,7 +33,7 @@ jobs: path: downloads.svg - name: Create JSON - run: + run: | echo '{"subject": "Docker pulls", "status": "${{ steps.downloads.outputs.total_downloads }} (${{ steps.downloads.outputs.recent_downloads_per_day }}/day)", "color": "blue"}' > downloads.json - name: Upload badge to Gist @@ -73,7 +73,7 @@ jobs: path: workflows.svg - name: Create JSON - run: + run: | echo '{"subject": "GitHub Workflows", "status": "${{ steps.workflows.outputs.total_workflows }}", "color": "blue"}' > workflows.json - name: Upload badge to Gist From 65976d503cd9784d29751d1c9206c7bafe24c0bd Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Tue, 28 Nov 2023 08:59:32 +0100 Subject: [PATCH 19/28] Have badgen generate workflows and download badges from json --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8ca62ff1..03e6eb41 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ [![CI/CD](https://github.com/EnricoMi/publish-unit-test-result-action/actions/workflows/ci-cd.yml/badge.svg)](https://github.com/EnricoMi/publish-unit-test-result-action/actions/workflows/ci-cd.yml) [![GitHub release badge](https://badgen.net/github/release/EnricoMi/publish-unit-test-result-action/stable)](https://github.com/EnricoMi/publish-unit-test-result-action/releases/latest) [![GitHub license badge](misc/badge-license.svg)](http://www.apache.org/licenses/LICENSE-2.0) -[![GitHub Workflows badge](https://gist.github.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/workflows.svg)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) -[![Docker pulls badge](https://gist.github.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/downloads.svg)](https://github.com/users/EnricoMi/packages/container/package/publish-unit-test-result-action) +[![GitHub Workflows badge](https://badgen.net/https/gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/0838ccc17c78fe920b83e6f096847e23b85ff9c3/workflows.json)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) +[![Docker pulls badge](https://badgen.net/https/gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/0838ccc17c78fe920b83e6f096847e23b85ff9c3/downloads.json)](https://github.com/users/EnricoMi/packages/container/package/publish-unit-test-result-action) ![Arm badge](misc/badge-arm.svg) ![Ubuntu badge](misc/badge-ubuntu.svg) From f0b959beb675130d17f0bfaf944f0d0fe00d2cf3 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 1 Dec 2023 16:41:19 +0100 Subject: [PATCH 20/28] Use forwarded Gist URL (#537) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 03e6eb41..ac5942b4 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ [![CI/CD](https://github.com/EnricoMi/publish-unit-test-result-action/actions/workflows/ci-cd.yml/badge.svg)](https://github.com/EnricoMi/publish-unit-test-result-action/actions/workflows/ci-cd.yml) [![GitHub release badge](https://badgen.net/github/release/EnricoMi/publish-unit-test-result-action/stable)](https://github.com/EnricoMi/publish-unit-test-result-action/releases/latest) [![GitHub license badge](misc/badge-license.svg)](http://www.apache.org/licenses/LICENSE-2.0) -[![GitHub Workflows badge](https://badgen.net/https/gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/0838ccc17c78fe920b83e6f096847e23b85ff9c3/workflows.json)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) -[![Docker pulls badge](https://badgen.net/https/gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/0838ccc17c78fe920b83e6f096847e23b85ff9c3/downloads.json)](https://github.com/users/EnricoMi/packages/container/package/publish-unit-test-result-action) +[![GitHub Workflows badge](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/workflows.svg)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) +[![Docker pulls badge](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/downloads.svg)](https://github.com/users/EnricoMi/packages/container/package/publish-unit-test-result-action) ![Arm badge](misc/badge-arm.svg) ![Ubuntu badge](misc/badge-ubuntu.svg) From b9929bc08c31f9a5218ac5523d00af6b0f4f04f4 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 15 Dec 2023 20:24:41 +0100 Subject: [PATCH 21/28] Remove link from emojis in summary MD (#540) --- python/publish/__init__.py | 10 +-- python/test/files/dart/json/tests.annotations | 19 +---- .../junit-xml/bazel/suite-logs.annotations | 18 +---- .../junit-xml/jest/jest-junit.annotations | 16 +--- .../junit-xml/junit.multiresult.annotations | 28 +------ .../junit-xml/minimal-attributes.annotations | 18 +---- .../mocha/latex-utensils.annotations | 24 +----- .../files/junit-xml/no-attributes.annotations | 18 +---- .../junit-xml/no-cases-but-tests.annotations | 16 +--- .../test/files/junit-xml/no-cases.annotations | 16 +--- .../files/junit-xml/non-junit.annotations | 16 +--- .../junit-xml/pytest/junit.fail.annotations | 16 +--- .../pytest/junit.gloo.elastic.annotations | 15 +--- .../junit.gloo.elastic.spark.tf.annotations | 15 +--- ...junit.gloo.elastic.spark.torch.annotations | 15 +--- .../pytest/junit.gloo.standalone.annotations | 17 +--- .../pytest/junit.gloo.static.annotations | 17 +--- .../pytest/junit.mpi.integration.annotations | 16 +--- .../pytest/junit.mpi.standalone.annotations | 15 +--- .../pytest/junit.mpi.static.annotations | 15 +--- .../junit.spark.integration.1.annotations | 15 +--- .../junit.spark.integration.2.annotations | 15 +--- ...ch.spark.diff.DiffOptionsSuite.annotations | 16 +--- .../testsuite-in-testsuite.annotations | 16 +--- .../junit-xml/testsuite-root.annotations | 16 +--- .../files/junit-xml/tst/disabled.annotations | 30 +------ .../junit-xml/unsupported-unicode.annotations | 18 +---- .../junit-xml/with-xml-entities.annotations | 18 +---- .../files/junit-xml/xunit/xunit.annotations | 16 +--- python/test/files/mocha/tests.annotations | 18 +---- .../nunit/mstest/clicketyclackety.annotations | 25 +----- .../files/nunit/mstest/pickles.annotations | 16 +--- .../nunit/mstest/timewarpinc.annotations | 16 +--- .../nunit3/jenkins/NUnit-correct.annotations | 19 +---- .../nunit3/jenkins/NUnit-correct2.annotations | 23 +----- .../nunit3/jenkins/NUnit-correct3.annotations | 17 +--- .../nunit3/jenkins/NUnit-failure.annotations | 16 +--- .../jenkins/NUnit-healthReport.annotations | 16 +--- .../nunit3/jenkins/NUnit-ignored.annotations | 16 +--- .../jenkins/NUnit-issue1077.annotations | 16 +--- .../jenkins/NUnit-issue33493.annotations | 23 +----- .../jenkins/NUnit-issue44527.annotations | 78 +++---------------- .../jenkins/NUnit-issue48478.annotations | 16 +--- .../jenkins/NUnit-issue50162.annotations | 16 +--- .../jenkins/NUnit-issue5674.annotations | 16 +--- .../jenkins/NUnit-issue6353.annotations | 16 +--- .../jenkins/NUnit-multinamespace.annotations | 16 +--- .../jenkins/NUnit-sec1752-file.annotations | 16 +--- .../jenkins/NUnit-sec1752-https.annotations | 16 +--- .../nunit3/jenkins/NUnit-simple.annotations | 16 +--- .../nunit/nunit3/jenkins/NUnit.annotations | 16 +--- .../nunit3/jenkins/NUnitUnicode.annotations | 16 +--- .../test/files/trx/mstest/pickles.annotations | 16 +--- .../nunit/FluentValidation.Tests.annotations | 17 +--- .../trx/nunit/NUnit-net461-sample.annotations | 16 +--- .../NUnit-netcoreapp3.1-sample.annotations | 16 +--- .../files/trx/nunit/SilentNotes.annotations | 17 +--- .../files/trx/xunit/dotnet-trx.annotations | 16 +--- .../trx/xunit/xUnit-net461-sample.annotations | 16 +--- .../xUnit-netcoreapp3.1-sample.annotations | 16 +--- ...mi_YAMILEX 2015-10-24 04_18_59.annotations | 15 +--- .../test/files/xunit/mstest/fixie.annotations | 23 +----- .../mstest/jenkinsci/testcase1.annotations | 16 +--- .../mstest/jenkinsci/testcase2.annotations | 23 +----- .../mstest/jenkinsci/testcase3.annotations | 16 +--- .../mstest/jenkinsci/testcase4.annotations | 16 +--- .../mstest/jenkinsci/testcase5.annotations | 16 +--- .../mstest/jenkinsci/testcase6.annotations | 16 +--- .../files/xunit/mstest/pickles.annotations | 23 +----- python/test/test_publish.py | 22 +++--- python/test/test_publisher.py | 70 ++++++++--------- 71 files changed, 278 insertions(+), 1067 deletions(-) diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 7bb04106..e1ff4564 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -446,11 +446,11 @@ def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str: all_tests_label_md = 'tests' -passed_tests_label_md = get_link_and_tooltip_label_md(':heavy_check_mark:', 'passed tests') -skipped_tests_label_md = get_link_and_tooltip_label_md(':zzz:', 'skipped / disabled tests') -failed_tests_label_md = get_link_and_tooltip_label_md(':x:', 'failed tests') -test_errors_label_md = get_link_and_tooltip_label_md(':fire:', 'test errors') -duration_label_md = get_link_and_tooltip_label_md(':stopwatch:', 'duration of all tests') +passed_tests_label_md = ':heavy_check_mark:' +skipped_tests_label_md = ':zzz:' +failed_tests_label_md = ':x:' +test_errors_label_md = ':fire:' +duration_label_md = ':stopwatch:' def get_short_summary_md(stats: UnitTestRunResultsOrDeltaResults) -> str: diff --git a/python/test/files/dart/json/tests.annotations b/python/test/files/dart/json/tests.annotations index dc5fdfa6..63bc9f36 100644 --- a/python/test/files/dart/json/tests.annotations +++ b/python/test/files/dart/json/tests.annotations @@ -7,21 +7,10 @@ 'output': { 'title': '2 errors, 1 fail, 1 skipped, 16 pass in 0s', 'summary': - '20 tests\u2002\u2003\u200316 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '4 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '20 tests\u2002\u2003\u200316 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u205f\u20044 suites\u2003\u2003\u205f\u20041 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20041 :x:\u2003\u20032 :fire:\n' + '\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REiy4TYkVDpTHqKrp74w+au3nvY97DNzhXwyfWNYwbBzbAQLA4FBa0ImwJabB+6j' 'PMxknpP8diDrhTK4pNwFmJFVGjz5BBp3LR31UwitIL/MsF/tekvi6wBOliZhf8/QAMgVR' diff --git a/python/test/files/junit-xml/bazel/suite-logs.annotations b/python/test/files/junit-xml/bazel/suite-logs.annotations index ced06018..c4d7686e 100644 --- a/python/test/files/junit-xml/bazel/suite-logs.annotations +++ b/python/test/files/junit-xml/bazel/suite-logs.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLbT1M4QgxIsC5g4q498liHp0OzvJnNLBbknOYhqEpAzpgyWjThBDwbFgEelVdSvKxn' 'CpaIOjO5yGvTssYsQWwRyITZ57+K9VZrHKvGWi95AKtCVo1fK6AX55nzvdAAAA\n', diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations index a249cf66..e6410a28 100644 --- a/python/test/files/junit-xml/jest/jest-junit.annotations +++ b/python/test/files/junit-xml/jest/jest-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations index 843e9d7d..2eac018c 100644 --- a/python/test/files/junit-xml/junit.multiresult.annotations +++ b/python/test/files/junit-xml/junit.multiresult.annotations @@ -7,30 +7,10 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 1s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20021s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n4 runs\u2006\u2003' - '-2 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '3 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20032 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20031 suites\u2004\u2003\u20021s :stopwatch:\n4 tests\u2003' + '1 :heavy_check_mark:\u20031 :zzz:\u20031 :x:\u20031 :fire:\n4 runs\u2006\u2003' + '-2 :heavy_check_mark:\u20033 :zzz:\u20032 :x:\u20031 :fire:\n\n' + 'Results for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KotfBTeRlCEONGPmYXKuPdlQhEujdvkrn4BkYTX9jQMU4RQoU1ogzgXcZXhKTmsgVFpf' '5S0AFnc2wSTHNoRI/5wehKL82S68d6fLmpcK5V/48pby2EF/JitEt+P6y+BE/eAAAA\n', diff --git a/python/test/files/junit-xml/minimal-attributes.annotations b/python/test/files/junit-xml/minimal-attributes.annotations index d61a33f6..73faacfa 100644 --- a/python/test/files/junit-xml/minimal-attributes.annotations +++ b/python/test/files/junit-xml/minimal-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations index 9a0e673a..d698a0db 100644 --- a/python/test/files/junit-xml/mocha/latex-utensils.annotations +++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations @@ -8,26 +8,10 @@ 'title': 'All 101 tests pass in 0s', 'summary': '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 ' - 'suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '101 tests\u2003101 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n109 runs\u2006\u2003' - '109 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + 'suites\u2004\u2003\u20020s :stopwatch:\n101 tests\u2003101 ' + ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n109 runs\u2006\u2003109 ' + ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv' '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK' 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n', diff --git a/python/test/files/junit-xml/no-attributes.annotations b/python/test/files/junit-xml/no-attributes.annotations index b263b6d0..29db427c 100644 --- a/python/test/files/junit-xml/no-attributes.annotations +++ b/python/test/files/junit-xml/no-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations index f00f7925..d2f513be 100644 --- a/python/test/files/junit-xml/no-cases-but-tests.annotations +++ b/python/test/files/junit-xml/no-cases-but-tests.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 skipped, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MS+bmCo7V8ol1DePWg/th8SgcGE3YEtLhvmvMe7' 'ZeShJDETtcJPpfKAFHqkWxIhpMQfQ6975Z5yKXWuAqFrhuSXOe4AjSYnYT/HkBNCXSZd0' diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations index 2e91b4cb..c580ccc2 100644 --- a/python/test/files/junit-xml/no-cases.annotations +++ b/python/test/files/junit-xml/no-cases.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/junit-xml/non-junit.annotations b/python/test/files/junit-xml/non-junit.annotations index c3c2e082..617eadba 100644 --- a/python/test/files/junit-xml/non-junit.annotations +++ b/python/test/files/junit-xml/non-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 parse errors', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n0 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n1 errors\n\nResults ' - 'for commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n0 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n1 errors\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBBFf' '0Vm3aK2/UyIKQ35iBldRf+emNC4u+dcODc49JZhVcukgAvmBnOFvZDOmGLHemSWe+NizC' 'hOvAbhNPpBWKJE3VCJLKbMffzXGotYY9kyKQTMFfpSfGh4XnRU87HdAAAA\n', diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations index d8e1b75d..103fcee0 100644 --- a/python/test/files/junit-xml/pytest/junit.fail.annotations +++ b/python/test/files/junit-xml/pytest/junit.fail.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 3 pass in 2s', 'summary': - '5 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfzExssYghA3IpgFKuPdXVEUuzez2dm5BqM8H1hTMe4jhBemiCKAs4QtIR3CderzHn' '2UkkT3iQW25/kWWoD5CYXokExNBqPNvWuWuZu/WuIilrhsSbeuEAiexfws+HECiWEEJ90' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations index 15ef5279..b0f0e449 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 10 tests pass, 4 skipped in 1m 12s', 'summary': - '14 tests\u2002\u2003\u200310 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 12s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20044 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '14 tests\u2002\u2003\u200310 :heavy_check_mark:\u2003\u20031m 12s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20044 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations index 4845e007..ee663b01 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 20 tests pass, 2 skipped in 10m 27s', 'summary': - '22 tests\u2002\u2003\u200320 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '10m 27s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '22 tests\u2002\u2003\u200320 :heavy_check_mark:\u2003\u200310m 27s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations index 8c469e9e..ebbc888c 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 11m 10s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '11m 10s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '22 tests\u2002\u2003\u200322 :heavy_check_mark:\u2003\u200311m 10s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations index e440d68c..571c1237 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 80 tests pass, 17 skipped in 3m 25s', 'summary': - '97 tests\u2002\u2003\u200380 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 25s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200317 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '97 tests\u2002\u2003\u200380 :heavy_check_mark:\u2003\u20033m 25s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u200317 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv' 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations index 160c5610..6d50a75f 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 12 tests pass, 12 skipped in 1m 9s', 'summary': - '24 tests\u2002\u2003\u200312 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 9s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200312 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '24 tests\u2002\u2003\u200312 :heavy_check_mark:\u2003\u20031m 9s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u200312 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o' 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations index f84b911f..6a310f1a 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 3 tests pass in 15s', 'summary': - '3 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '15s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u200315s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttAYGy9jCGLciGAWqIx3d/EL3Zt5yewwodEeetFUAnzE8MEYSQZ0NmHHzE9IX/vuwU' 'elSrHgxqL+xCTRFEITOXoMRfv20sxzN/+1i7PYxXlLuXXFwPAs4WcJxwk6KM9l3gAAAA=' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations index 695f8c8d..6130ffd4 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 96 tests pass, 1 skipped in 3m 39s', 'summary': - '97 tests\u2002\u2003\u200396 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 39s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '97 tests\u2002\u2003\u200396 :heavy_check_mark:\u2003\u20033m 39s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20041 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations index 1b881f0f..97fca517 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 24 tests pass in 2m 4s', 'summary': - '24 tests\u2002\u2003\u200324 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '24 tests\u2002\u2003\u200324 :heavy_check_mark:\u2003\u20032m 4s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr' 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations index b6ad16ba..2194bc52 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 45s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 45s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '35 tests\u2002\u2003\u200333 :heavy_check_mark:\u2003\u20032m 45s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations index 77d9cda4..3f4e6053 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations @@ -7,18 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 52s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 52s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '35 tests\u2002\u2003\u200333 :heavy_check_mark:\u2003\u20032m 52s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr' '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf' diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations index 0a96b20c..a875e388 100644 --- a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 2s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igSz8TYkZLlrHqKfr3VlOz25t5MBdfQCvLR9Y1jFsPrsDsUTgwB2FPSIcL15D3ZL' '2Uf7HBSaItYhGgf0IhGkwG/ZF7Yda5l79a5CoWuW5Js+/gCNJidhX8fgDdy7133QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations index 6ada1666..f14f9aff 100644 --- a/python/test/files/junit-xml/testsuite-in-testsuite.annotations +++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations index 6ada1666..f14f9aff 100644 --- a/python/test/files/junit-xml/testsuite-root.annotations +++ b/python/test/files/junit-xml/testsuite-root.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations index db795d3e..1812a95a 100644 --- a/python/test/files/junit-xml/tst/disabled.annotations +++ b/python/test/files/junit-xml/tst/disabled.annotations @@ -8,32 +8,10 @@ 'title': '1 errors, 19 fail, 5 skipped, 6 pass in 0s', 'summary': '\u205f\u20041 files\u2004\u2003\u205f\u20042 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n31 ' - 'tests\u2003\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '5 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '19 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n31 runs\u2006\u2003' - '11 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '19 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '0s :stopwatch:\n31 tests\u2003\u205f\u20046 :heavy_check_mark:\u2003' + '5 :zzz:\u200319 :x:\u20031 :fire:\n31 runs\u2006\u200311 ' + ':heavy_check_mark:\u20030 :zzz:\u200319 :x:\u20031 :fire:\n\nResults ' + 'for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NQQqAIBBFr' 'yKuW2RRUJcJsaIhyxh1Fd29sdLczXsf3px8Bj1Z3jNRMG49uAcqgtGjdGB2wpKQBhemWk' 'QYrFeKTPuLFQ4STRKzBB3aXTITosHvHfo9FcMdg+IXb7CMnPcekeeU2TZwBN/F7CL5dQP' diff --git a/python/test/files/junit-xml/unsupported-unicode.annotations b/python/test/files/junit-xml/unsupported-unicode.annotations index 562a3976..2fe151ac 100644 --- a/python/test/files/junit-xml/unsupported-unicode.annotations +++ b/python/test/files/junit-xml/unsupported-unicode.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '2 errors, 2 fail, 2 skipped, 1 pass in 8s', 'summary': - '7 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '8s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20032 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '7 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20038s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '2 :x:\u2003\u20032 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYXXTReBlDUGIjgikwGe9uQVDc/ntt3skV6MXxkbUN4y6Af2EOKDxYQzgQ0sHHU1/25I' 'KU+TeLDQ4S3SuUAP0TC6LFbDCY0ouzzj381RJXscR1S9p9B0+QF3Or4NcNSlhwMN0AAAA' diff --git a/python/test/files/junit-xml/with-xml-entities.annotations b/python/test/files/junit-xml/with-xml-entities.annotations index a887a097..1bddc789 100644 --- a/python/test/files/junit-xml/with-xml-entities.annotations +++ b/python/test/files/junit-xml/with-xml-entities.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 2 skipped in 0s', 'summary': - '4 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ5AMBBFr' '9J0bYFYuYw0RUyUyky7Enc3qmV2/72fvFPP4CbSvWoqpSlC+GCMaAL4nbFm5CM8V1f2QN' 'FaeQ60wsGi/cRswOXaKyZEj9lg3EvvmTL38l9LLGKJZcv6bYPAkJeixejrBpBXIV3dAAA' diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations index 702ffbd0..1d6aa0d5 100644 --- a/python/test/files/junit-xml/xunit/xunit.annotations +++ b/python/test/files/junit-xml/xunit/xunit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/mocha/tests.annotations b/python/test/files/mocha/tests.annotations index 607a0c8e..120e7589 100644 --- a/python/test/files/mocha/tests.annotations +++ b/python/test/files/mocha/tests.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 2 pass in 12s', 'summary': - '5 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '12s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u200312s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KobSSx8TKGoMaNfMwClfHu8hXs3sxm56Y7yM3SmYwDodaD+2D1yB0YHZEFDhcXb1Pdi/' 'VCBMGaOOEq31nsHORPbIgGi0Gvay/OPpe51RJ3scR9SxilwAUoi9iD0+cFI3viF94AAAA' diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations index 663345cb..4c6a0c96 100644 --- a/python/test/files/nunit/mstest/clicketyclackety.annotations +++ b/python/test/files/nunit/mstest/clicketyclackety.annotations @@ -8,28 +8,9 @@ 'title': '10 fail, 12 pass in 0s', 'summary': '\u205f\u20041 files\u2004\u2003\u205f\u20048 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n22 ' - 'tests\u200312 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '10 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n23 runs\u2006\u2003' - '13 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '10 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '0s :stopwatch:\n22 tests\u200312 :heavy_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n23 runs\u2006\u200313 :heavy_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv' '9g9kHr+FklB1z1ft4UmDgpyYroMG8wnEk55Ps3lqAIE9e+FNQ67awFYsiX8LuSzwvzas/' diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations index ddf7aa8d..c958f5a0 100644 --- a/python/test/files/nunit/mstest/pickles.annotations +++ b/python/test/files/nunit/mstest/pickles.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations index 7174fd3c..8f3fb865 100644 --- a/python/test/files/nunit/mstest/timewarpinc.annotations +++ b/python/test/files/nunit/mstest/timewarpinc.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 2s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv' '0I4e1CPfsYQhNiIYAqcjH+3IgjednbbObkGozyf2NAx7iOED5aIIoCzhCMhDaFMKc8+Sk' 'lFX4sNjl+hBZjfi0J0mE8w2uJ7Yqt7udoSN7LErUu6fYdAkBPzq+DXDXGDl7HdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations index de1d72e3..b627f2ec 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations @@ -7,21 +7,10 @@ 'output': { 'title': '1 errors, 1 fail, 8 skipped, 18 pass in 0s', 'summary': - '28 tests\u2002\u2003\u200318 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n11 ' - 'suites\u2003\u2003\u205f\u20048 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '28 tests\u2002\u2003\u200318 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n11 suites\u2003\u2003\u205f\u20048 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u2003\u205f\u20041 :x:\u2003\u20031 :fire:\n\n' + 'Results for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHdTJeBlDUGKjiGlhMt7dIqi49b3fvEMaWCeSvWgqISmATxBpDKg8uI25ZuTFx63tHh' 'goaB2/C7PAzuYTRsGa60lMiA6zwbC9xXj/gkl8vZuL3M1lTTtrwTPkS9Cs5HkBSPFg+uI' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations index 16eaf98a..f7e67371 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations @@ -8,26 +8,9 @@ 'title': 'All 183 tests pass in 0s', 'summary': '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '183 tests\u2003183 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n218 runs\u2006\u2003' - '218 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '0s :stopwatch:\n183 tests\u2003183 :heavy_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n218 runs\u2006\u2003218 :heavy_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr' '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN' 'vmod4Hrf9qgi3/6K2C+SMMosNkMKxXs67aBE8yN28xchaMnPe0WxYghnQJPyp5nNtosNP' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations index 566ebf06..6493e048 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 22 tests pass in 4m 24s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4m 24s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n13 ' - 'suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200322 :heavy_check_mark:\u2003\u20034m 24s ' + ':stopwatch:\n13 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV' 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations index 18f21f4a..66dc0373 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfx0XoYQ1LhRwCxQGe/uykfo5s1M3s03OFfHZzZ0jLsA/ocloPRgDWFPSIP/pqlk4Y' 'JSVIy1OOBq32KTcGZbKlZEi/mCwRTfF1td4mqL3Mgity5ltQZPkBNzu+TPC/n9SCLdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations index e869a0e0..9368cf8e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 9 pass in 1s', 'summary': - '10 tests\u2002\u2003\u20039 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '10 tests\u2002\u2003\u20039 :heavy_check_mark:\u2003\u20031s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u20030 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u20031 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv' '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys' 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations index c57eb736..f42a6d45 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 1 tests pass, 2 skipped in 0s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/Cz8zKGIMRGBdPCynh3KyKymzfTvlNa2AzJUXSNkBQhFJgjqgDeMbaMPIRnGr48Ud' 'Q63+ZihYOLvhRWwVa/TwbRY24wus/3xFr38m9LXMkS1y7t9x0CQ06CFiWvGx5uWF7dAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations index 900ef576..c83c9ccb 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 6 tests pass in 35s', 'summary': - '6 tests\u2002\u2003\u20036 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '35s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20036 :heavy_check_mark:\u2003\u200335s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dyiiDv1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawjbjpgef3992q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMC7eTeEN4AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations index 79afa56f..5eb519c2 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations @@ -7,25 +7,10 @@ 'output': { 'title': 'All 1 tests pass, 1 skipped in 6s', 'summary': - '1 files\u2004\u20032 suites\u2004\u2003\u20026s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n2 runs\u2006\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20032 suites\u2004\u2003\u20026s :stopwatch:\n2 tests\u2003' + '1 :heavy_check_mark:\u20031 :zzz:\u20030 :x:\n2 runs\u2006\u20032 ' + ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/' 'JRyvwxixX2n9ACDIn2FQrRYTYYbends+Q+fmrlaR1LXLek2zYIBHkxvwh+XlEX1VPdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations index cfbe8ada..c743778f 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations @@ -8,28 +8,10 @@ 'title': '140 fail, 6 pass in 14m 11s', 'summary': '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' + ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' + '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -1935,28 +1917,10 @@ 'title': '140 fail, 6 pass in 14m 11s', 'summary': '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' + ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' + '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -3676,28 +3640,10 @@ 'title': '140 fail, 6 pass in 14m 11s', 'summary': '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' + ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' + '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations index 64a49691..5c896458 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 3 tests pass in 17s', 'summary': - '3 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '17s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u200317s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfwUJl7GENS4EcEsUBnv7oKi0L2Zl8zJF1Cz5QNrKsatBxehJZg8CgdGh68npseFr0' 't7tF7KUmxwkKg/sQhQhZgRDb4GvU69MPPcw38tchaLnLek2XdwBO9idhX8ugG5zrfD3gA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations index d5dc87ef..61ded832 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLfwkFl7GEIS4kY9ZoDLe3RU1YDdvZvIOrsGowCfWNYyHBDFDT7AkFBG8I2wJaYj3NH' '55DklKKoZSbLDX71kLML+HQvT4XjC5z3fHWvdwsWWuZJlrl/TWQiR4Ewur4OcFmZnWM90' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations index a8d7eebf..24b4ced3 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 6 pass in 0s', 'summary': - '9 tests\u2002\u2003\u20036 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n3 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '9 tests\u2002\u2003\u20036 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n3 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MywqAIBREf' '0VctyiCoH5GxIwu+YirrqJ/72ZK7ubMDOfiGxgd+MKGjvGQIGYYCdaEMoJ3hD0hDfGd5p' 'pFSEpRMf3FAWf7FpsEU2xfoRE9lgsmV31vbHUf/7bMjSxz61LeWogEJbGwS34/WLAikt0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations index 0b7da2a7..635950f3 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 3s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20033s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHTRuXsYQlNgoYlqYjHe3KmrZ/ntt3q4dLCPpTjWV0pQgfjAkNBHCytgy8iGS3D0la/' 'NvFjNshXAGlkKMiAHZ1GwwrW/vmjL38F+7WcRuli0bvIfIkJeiyejjBNBleN/dAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations index 591f3874..2f7352b6 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 4 tests pass in 0s', 'summary': - '4 tests\u2002\u2003\u20034 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20034 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdRYeRlCUOJGPmaBynh3F8SI3byZyTu5BrMGPrOhYzwkiAVGgiWhjOAdYU9IQ8zT9G' 'YRklL/YoejfQstwfyKFdFjbTC515djq3v4sxVuZIVbl/LWQiSoiYVN8usGDjGDkd0AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations index 2e3d6903..da726189 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations index 6bb39b9e..73d4cc3e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations index 8436a283..9feb4f9d 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations index 4976bb2b..9933a0ad 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations index 0872da7c..cc236ef7 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/trx/mstest/pickles.annotations b/python/test/files/trx/mstest/pickles.annotations index b8073193..f4440d86 100644 --- a/python/test/files/trx/mstest/pickles.annotations +++ b/python/test/files/trx/mstest/pickles.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLTRaeRlCUONGAbNAZby7Kx+hmzczeTff4Fwdn9nQMe4C+B+WgNKDNYQ9IQ3+m6aShQ' 'tKUTHW4oCrfYtNwpltqVgRLeYLBlN8X2x1iastciOL3LqU1Ro8QU7M7ZI/L5ec2abdAAA' diff --git a/python/test/files/trx/nunit/FluentValidation.Tests.annotations b/python/test/files/trx/nunit/FluentValidation.Tests.annotations index 53ae1dc3..f36872ff 100644 --- a/python/test/files/trx/nunit/FluentValidation.Tests.annotations +++ b/python/test/files/trx/nunit/FluentValidation.Tests.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 803 tests pass, 1 skipped in 3s', 'summary': - '804 tests\u2002\u2003\u2003803 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004\u205f\u2004' - '1 suites\u2003\u2003\u205f\u2004\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u2004\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '804 tests\u2002\u2003\u2003803 :heavy_check_mark:\u2003\u20033s ' + ':stopwatch:\n\u205f\u2004\u205f\u20041 suites\u2003\u2003\u205f\u2004\u205f\u2004' + '1 :zzz:\n\u205f\u2004\u205f\u20041 files\u2004\u2002\u2003\u2003\u205f\u2004\u205f\u2004' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dzDsEP1MiBUtacaqp+jf28yybm/mwex8AjN63rG6YtxHCC8MEVUAtxJKQjrCdbWiea' 'j3UeukZFELbDlwi0mBISFeMSI6zAbjWpoX/JO3KcXEn2Dib087ayEQ5MX8rPhxArdpBif' diff --git a/python/test/files/trx/nunit/NUnit-net461-sample.annotations b/python/test/files/trx/nunit/NUnit-net461-sample.annotations index adba4f2a..3a3f0698 100644 --- a/python/test/files/trx/nunit/NUnit-net461-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-net461-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations index 03764a41..36b18ffa 100644 --- a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/SilentNotes.annotations b/python/test/files/trx/nunit/SilentNotes.annotations index fe9ec8ce..51410e69 100644 --- a/python/test/files/trx/nunit/SilentNotes.annotations +++ b/python/test/files/trx/nunit/SilentNotes.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 67 tests pass, 12 skipped in 0s', 'summary': - '79 tests\u2002\u2003\u200367 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200312 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '79 tests\u2002\u2003\u200367 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u200312 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCopcxBCVuRDELVMa7u/hB7HbmZWfnGszoeMeqgnEXwCcYAkoPdiUsCWnwcRLtC7' '0LSpFpxGdm2OJ7nYyWYPJCPyJafAyGNSXj/SveIgteIutdnOeUXRbwBM/F3CT5cQKN/0L' diff --git a/python/test/files/trx/xunit/dotnet-trx.annotations b/python/test/files/trx/xunit/dotnet-trx.annotations index 8d72da4d..21aa04b0 100644 --- a/python/test/files/trx/xunit/dotnet-trx.annotations +++ b/python/test/files/trx/xunit/dotnet-trx.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '5 fail, 1 skipped, 5 pass in 0s', 'summary': - '11 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u20035 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '11 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u205f\u20041 suites\u2003\u20031 :zzz:\n\u205f\u20041 ' + 'files\u2004\u2002\u2003\u20035 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLaSw8TKGIMaNfMwClfHuroCK3byZ3XfwBYwOfGSiYzwkiC/MCWUE7wh7QhpinsQDU0' 'hKUTN8xQZ7/S7FIsH8LjSix2rE5F7hnVtf4U+XubFlbmXKWwuRoCYWVsnPC2b3Tg/fAAA' diff --git a/python/test/files/trx/xunit/xUnit-net461-sample.annotations b/python/test/files/trx/xunit/xUnit-net461-sample.annotations index 8c80f059..1457b87b 100644 --- a/python/test/files/trx/xunit/xUnit-net461-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-net461-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations index cf741d59..55ec284d 100644 --- a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations index e1825e61..406cbb18 100644 --- a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations +++ b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations @@ -7,18 +7,9 @@ 'output': { 'title': '2 fail, 21 skipped, 2 pass in 26s', 'summary': - '25 tests\u2002\u2003\u2003\u205f\u20042 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '26s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200321 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20042 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' + '25 tests\u2002\u2003\u2003\u205f\u20042 :heavy_check_mark:\u2003\u2003' + '26s :stopwatch:\n\u205f\u20041 suites\u2003\u200321 :zzz:\n\u205f\u2004' + '1 files\u2004\u2002\u2003\u2003\u205f\u20042 :x:\n\nResults for ' 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLcRECy9jCErciGAWqIx3d/GD2O3M28zONZjJ856JinEfIWQYI8oAzhI2HTEtIW1N+8' diff --git a/python/test/files/xunit/mstest/fixie.annotations b/python/test/files/xunit/mstest/fixie.annotations index 99c5fb52..d88fe0fd 100644 --- a/python/test/files/xunit/mstest/fixie.annotations +++ b/python/test/files/xunit/mstest/fixie.annotations @@ -7,25 +7,10 @@ 'output': { 'title': '3 fail, 1 skipped, 1 pass in 8s', 'summary': - '1 files\u2004\u20032 suites\u2004\u2003\u20028s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n5 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n7 runs\u2006\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20032 suites\u2004\u2003\u20028s :stopwatch:\n5 tests\u2003' + '1 :heavy_check_mark:\u20031 :zzz:\u20033 :x:\n7 runs\u2006\u20033 ' + ':heavy_check_mark:\u20031 :zzz:\u20033 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QSRZcJsaKh0hh1Fd29Scxs998b5p18hm2yvGdVwbj14ALUBKNH6cBowo6QDu45Ne' '8erFcqPkaxwvETs4SNhEhiQjRIpiSDXj+9Ns43JxJ/tcBZLHDeUmbfwRHExewi+XUDvcl' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations index fba3c114..7c07f411 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XsYQhLiRj1mgMt5dQFDo5s1M3kUlKOHoQqaBUBfAf7AFZB6siThGjINP01zz6g' 'Ln5VuKA86ukAxUVwhEi0WIwVRfiq3u5d+WuZFlbl3cag0+QknE7YzeD2gV0DndAAAA\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations index 856c56a7..201d293b 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations @@ -7,25 +7,10 @@ 'output': { 'title': 'All 1 tests pass in 0s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n3 runs\u2006\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n1 tests\u2003' + '1 :heavy_check_mark:\u20030 :zzz:\u20030 :x:\n3 runs\u2006\u20031 ' + ':heavy_check_mark:\u20031 :zzz:\u20031 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XoYQhLiRj1mWynh3UZBgN29m8k5uwOrIFzYNjMcE1GBNKAmCzzhmzAN905tFTE' 'r9ix2O/i2MBPsrNGLA2mDyj2+usdcVLrbGVda4d6ngHFCGmljcJL9uwvXP6N0AAAA=\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations index e8e665fa..90cdc84a 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 4m 48s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4m 48s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20034m 48s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfw0xMsYgho3opgFKuPdXRAUuzezmzn5DHqyvGdNxbj14F4YPUoHZidshSBBJxeOXd' '6D9Uql7yRWOH5ilqB/YkI0SKYmg37PvTDL3MNfLXIRi1y2lNk2cARpMbtIft14m53n3wA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations index 2e91b4cb..c580ccc2 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations index 7d4e1989..61c20690 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 1m 32s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 32s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20031m 32s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igQ/1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawj7lpgef39d2q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMCmAJ3I94AAAA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations index a6308b2a..11da6003 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/xunit/mstest/pickles.annotations b/python/test/files/xunit/mstest/pickles.annotations index fa6b4065..04cdcc69 100644 --- a/python/test/files/xunit/mstest/pickles.annotations +++ b/python/test/files/xunit/mstest/pickles.annotations @@ -7,25 +7,10 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n3 ' - 'tests\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n4 runs\u2006\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n3 tests\u2003' + '2 :heavy_check_mark:\u20030 :zzz:\u20031 :x:\n4 runs\u2006\u20033 ' + ':heavy_check_mark:\u20030 :zzz:\u20031 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLVSsvIwhqHEjH7NAZby7qwLSzZuZvJOvoBfPR9Y1jPsIocAcUQZwlrAlpCE8k8h58l' 'EpKvq/2OGo39MqQSfbVyyIDtMFo318Q4pZJwr/tpcr2cu1SzljIBCkxPwm+XUDYSIL8t0' diff --git a/python/test/test_publish.py b/python/test/test_publish.py index 9001f3c1..58dc97e1 100644 --- a/python/test/test_publish.py +++ b/python/test/test_publish.py @@ -4,7 +4,7 @@ import mock -from publish import __version__, Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ +from publish import Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ get_json_path, get_error_annotation, get_digest_from_stats, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, \ @@ -555,11 +555,11 @@ def test_get_short_summary(self): def test_label_md(self): self.assertEqual(all_tests_label_md, 'tests') - self.assertEqual(passed_tests_label_md, f'[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")') - self.assertEqual(skipped_tests_label_md, f'[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")') - self.assertEqual(failed_tests_label_md, f'[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")') - self.assertEqual(test_errors_label_md, f'[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")') - self.assertEqual(duration_label_md, f'[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")') + self.assertEqual(passed_tests_label_md, f':heavy_check_mark:') + self.assertEqual(skipped_tests_label_md, f':zzz:') + self.assertEqual(failed_tests_label_md, f':x:') + self.assertEqual(test_errors_label_md, f':fire:') + self.assertEqual(duration_label_md, f':stopwatch:') def test_get_short_summary_md(self): self.assertEqual(get_short_summary_md(UnitTestRunResults( @@ -816,7 +816,7 @@ def test_get_long_summary_without_runs_md_with_errors(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8, runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8, commit='commit' - )), (f'4 {all_tests_label_md}   5 {passed_tests_label_md}  3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + )), (f'4 {all_tests_label_md}   5 {passed_tests_label_md}  3s :stopwatch:\n' f'2 suites  6 {skipped_tests_label_md}\n' f'1 files    7 {failed_tests_label_md}  8 {test_errors_label_md}\n' f'\n' @@ -2090,7 +2090,7 @@ def test_file_without_cases(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s :stopwatch:\n' f'1 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'\n' @@ -2101,7 +2101,7 @@ def test_file_without_cases_but_with_tests(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'6 {all_tests_label_md}   3 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'6 {all_tests_label_md}   3 {passed_tests_label_md}  0s :stopwatch:\n' f'1 suites  2 {skipped_tests_label_md}\n' f'1 files    1 {failed_tests_label_md}\n' f'\n' @@ -2112,7 +2112,7 @@ def test_non_parsable_file(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s :stopwatch:\n' f'0 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'1 errors\n' @@ -2124,7 +2124,7 @@ def test_files_with_testsuite_in_testsuite(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'5 {all_tests_label_md}   5 {passed_tests_label_md}  4s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'5 {all_tests_label_md}   5 {passed_tests_label_md}  4s :stopwatch:\n' f'4 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'\n' diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index fba4f0fc..e4db3392 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -672,16 +672,16 @@ def test_publish_comment_compare_earlier_with_restricted_unicode(self): self.assertEqual(('## title\n' '\u205f\u20041 files\u2004 ±\u205f\u20040\u2002\u2003' '2 suites\u2004 +1\u2002\u2003\u2002' - f'3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests") +2s\n' + f'3s :stopwatch: +2s\n' '22 tests +19\u2002\u2003' - f'4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") +3\u2002\u2003' - f'5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +3\u2002\u2003\u205f\u2004' - f'6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +\u205f\u20046\u2002\u2003\u205f\u2004' - f'7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +\u205f\u20047\u2002\n' - f'38 runs\u2006 +35\u2002\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") +7\u2002\u2003' - f'9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +7\u2002\u2003' - f'10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +10\u2002\u2003' - f'11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +11\u2002\n' + f'4 :heavy_check_mark: +3\u2002\u2003' + f'5 :zzz: +3\u2002\u2003\u205f\u2004' + f'6 :x: +\u205f\u20046\u2002\u2003\u205f\u2004' + f'7 :fire: +\u205f\u20047\u2002\n' + f'38 runs\u2006 +35\u2002\u20038 :heavy_check_mark: +7\u2002\u2003' + f'9 :zzz: +7\u2002\u2003' + f'10 :x: +10\u2002\u2003' + f'11 :fire: +11\u2002\n' '\n' 'For more details on these failures and errors, see [this check](html://url).\n' '\n' @@ -1334,7 +1334,7 @@ def do_test_publish_check_without_base_stats(self, errors: List[ParseError], ann 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  {summary_errors}2 suites   3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n22 tests 4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")   6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")   7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n38 runs  8 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n\\nResults for commit commit.\\n", ' + f'"summary": "  1 files  {summary_errors}2 suites   3s :stopwatch:\\n22 tests 4 :heavy_check_mark: 5 :zzz:   6 :x:   7 :fire:\\n38 runs  8 :heavy_check_mark: 9 :zzz: 10 :x: 11 :fire:\\n\\nResults for commit commit.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' f'"annotations": {len(annotations)}, ' @@ -1406,7 +1406,7 @@ def do_test_publish_check_with_base_stats(self, errors: List[ParseError]): 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  ±0  {summary_errors}2 suites  ±0   3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\") ±0s\\n22 tests +1  4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")  -   8  5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") +1    6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") +4    7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\") +  4 \\n38 runs  +1  8 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")  - 17  9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") +2  10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") +6  11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\") +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' + f'"summary": "  1 files  ±0  {summary_errors}2 suites  ±0   3s :stopwatch: ±0s\\n22 tests +1  4 :heavy_check_mark:  -   8  5 :zzz: +1    6 :x: +4    7 :fire: +  4 \\n38 runs  +1  8 :heavy_check_mark:  - 17  9 :zzz: +2  10 :x: +6  11 :fire: +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' '"stats_with_delta": {"files": {"number": 1, "delta": 0}, ' + f'"errors": {len(errors)}, ' + '"suites": {"number": 2, "delta": 0}, "duration": {"duration": 3, "delta": 0}, "tests": {"number": 22, "delta": 1}, "tests_succ": {"number": 4, "delta": -8}, "tests_skip": {"number": 5, "delta": 1}, "tests_fail": {"number": 6, "delta": 4}, "tests_error": {"number": 7, "delta": 4}, "runs": {"number": 38, "delta": 1}, "runs_succ": {"number": 8, "delta": -17}, "runs_skip": {"number": 9, "delta": 2}, "runs_fail": {"number": 10, "delta": 6}, "runs_error": {"number": 11, "delta": 10}, "commit": "commit", "reference_type": "earlier", "reference_commit": "past"}, ' @@ -1660,9 +1660,9 @@ def test_publish_check_with_suite_details(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1696,9 +1696,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests   1 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 2 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 3 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 1 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs  -12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 4 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 5 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 6 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files    1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests   1 :heavy_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :heavy_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1732,9 +1732,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1783,9 +1783,9 @@ def test_publish_check_with_cases(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1820,9 +1820,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests   1 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 2 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 3 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 1 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs  -12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 4 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 5 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 6 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files    1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests   1 :heavy_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :heavy_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1906,9 +1906,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -2250,9 +2250,9 @@ def test_publish_job_summary_without_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'22 tests\u20034 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20035 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u2003\u205f\u20046 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u2003\u205f\u20047 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'38 runs\u2006\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20039 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u200310 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u200311 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s :stopwatch:\n' + f'22 tests\u20034 :heavy_check_mark:\u20035 :zzz:\u2003\u205f\u20046 :x:\u2003\u205f\u20047 :fire:\n' + f'38 runs\u2006\u20038 :heavy_check_mark:\u20039 :zzz:\u200310 :x:\u200311 :fire:\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2281,9 +2281,9 @@ def test_publish_job_summary_with_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests") -1s\n' - f'22 tests +2\u2002\u20034 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") \u2006-\u200a1\u2002\u20035 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +1\u2002\u2003\u205f\u20046 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +1\u2002\u2003\u205f\u20047 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +1\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") \u2006-\u200a2\u2002\u20039 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") ±0\u2002\u200310 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +2\u2002\u200311 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +4\u2002\n' + f'\u205f\u20041 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s :stopwatch: -1s\n' + f'22 tests +2\u2002\u20034 :heavy_check_mark: \u2006-\u200a1\u2002\u20035 :zzz: +1\u2002\u2003\u205f\u20046 :x: +1\u2002\u2003\u205f\u20047 :fire: +1\u2002\n' + f'38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a2\u2002\u20039 :zzz: ±0\u2002\u200310 :x: +2\u2002\u200311 :fire: +4\u2002\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' From 3cd0197dd8230c0c911835a11e591ce38092cc94 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Mon, 18 Dec 2023 11:07:51 +0100 Subject: [PATCH 22/28] Improve emoji for passed tests (#542) * Improve emoji for passed tests * Amend symbol table in README.md * Adjust digit space and runs label --- README.md | 20 +-- python/publish/__init__.py | 6 +- python/test/files/dart/json/tests.annotations | 8 +- .../junit-xml/bazel/suite-logs.annotations | 2 +- .../junit-xml/jest/jest-junit.annotations | 2 +- .../junit-xml/junit.multiresult.annotations | 4 +- .../junit-xml/minimal-attributes.annotations | 2 +- .../mocha/latex-utensils.annotations | 9 +- .../files/junit-xml/no-attributes.annotations | 2 +- .../junit-xml/no-cases-but-tests.annotations | 2 +- .../test/files/junit-xml/no-cases.annotations | 2 +- .../files/junit-xml/non-junit.annotations | 2 +- .../junit-xml/pytest/junit.fail.annotations | 2 +- .../pytest/junit.gloo.elastic.annotations | 7 +- .../junit.gloo.elastic.spark.tf.annotations | 7 +- ...junit.gloo.elastic.spark.torch.annotations | 7 +- .../pytest/junit.gloo.standalone.annotations | 7 +- .../pytest/junit.gloo.static.annotations | 7 +- .../pytest/junit.mpi.integration.annotations | 2 +- .../pytest/junit.mpi.standalone.annotations | 7 +- .../pytest/junit.mpi.static.annotations | 7 +- .../junit.spark.integration.1.annotations | 7 +- .../junit.spark.integration.2.annotations | 7 +- ...ch.spark.diff.DiffOptionsSuite.annotations | 2 +- .../testsuite-in-testsuite.annotations | 2 +- .../junit-xml/testsuite-root.annotations | 2 +- .../files/junit-xml/tst/disabled.annotations | 9 +- .../junit-xml/unsupported-unicode.annotations | 2 +- .../junit-xml/with-xml-entities.annotations | 2 +- .../files/junit-xml/xunit/xunit.annotations | 2 +- python/test/files/mocha/tests.annotations | 2 +- .../nunit/mstest/clicketyclackety.annotations | 6 +- .../files/nunit/mstest/pickles.annotations | 2 +- .../nunit/mstest/timewarpinc.annotations | 2 +- .../nunit3/jenkins/NUnit-correct.annotations | 7 +- .../nunit3/jenkins/NUnit-correct2.annotations | 6 +- .../nunit3/jenkins/NUnit-correct3.annotations | 7 +- .../nunit3/jenkins/NUnit-failure.annotations | 2 +- .../jenkins/NUnit-healthReport.annotations | 6 +- .../nunit3/jenkins/NUnit-ignored.annotations | 2 +- .../jenkins/NUnit-issue1077.annotations | 2 +- .../jenkins/NUnit-issue33493.annotations | 4 +- .../jenkins/NUnit-issue44527.annotations | 24 ++-- .../jenkins/NUnit-issue48478.annotations | 2 +- .../jenkins/NUnit-issue50162.annotations | 2 +- .../jenkins/NUnit-issue5674.annotations | 2 +- .../jenkins/NUnit-issue6353.annotations | 2 +- .../jenkins/NUnit-multinamespace.annotations | 2 +- .../jenkins/NUnit-sec1752-file.annotations | 2 +- .../jenkins/NUnit-sec1752-https.annotations | 2 +- .../nunit3/jenkins/NUnit-simple.annotations | 2 +- .../nunit/nunit3/jenkins/NUnit.annotations | 2 +- .../nunit3/jenkins/NUnitUnicode.annotations | 2 +- .../test/files/trx/mstest/pickles.annotations | 2 +- .../nunit/FluentValidation.Tests.annotations | 8 +- .../trx/nunit/NUnit-net461-sample.annotations | 2 +- .../NUnit-netcoreapp3.1-sample.annotations | 2 +- .../files/trx/nunit/SilentNotes.annotations | 7 +- .../files/trx/xunit/dotnet-trx.annotations | 6 +- .../trx/xunit/xUnit-net461-sample.annotations | 2 +- .../xUnit-netcoreapp3.1-sample.annotations | 2 +- ...mi_YAMILEX 2015-10-24 04_18_59.annotations | 7 +- .../test/files/xunit/mstest/fixie.annotations | 4 +- .../mstest/jenkinsci/testcase1.annotations | 2 +- .../mstest/jenkinsci/testcase2.annotations | 4 +- .../mstest/jenkinsci/testcase3.annotations | 2 +- .../mstest/jenkinsci/testcase4.annotations | 2 +- .../mstest/jenkinsci/testcase5.annotations | 2 +- .../mstest/jenkinsci/testcase6.annotations | 2 +- .../files/xunit/mstest/pickles.annotations | 4 +- python/test/test_publish.py | 128 +++++++++--------- python/test/test_publisher.py | 126 ++++++++--------- 72 files changed, 270 insertions(+), 285 deletions(-) diff --git a/README.md b/README.md index ac5942b4..5cfb54b2 100644 --- a/README.md +++ b/README.md @@ -94,14 +94,14 @@ Check your favorite development and test environment for its JSON, TRX file or J |Test Environment |Language| JUnit
XML | NUnit
XML | XUnit
XML | TRX
file | JSON
file | |-----------------|:------:|:---------:|:---------:|:---------:|:---:|:---:| -|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | | :heavy_check_mark: | -|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:heavy_check_mark:| | | | | -|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:heavy_check_mark:| | | | | -|[Mocha](https://mochajs.org/#xunit)|JavaScript|:heavy_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| | :heavy_check_mark: | -|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:heavy_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | -|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:heavy_check_mark:| | | | | -|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:heavy_check_mark:| | | | | -|Your favorite
environment|Your favorite
language|probably
:heavy_check_mark:| | | | | +|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | |:white_check_mark:| +|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:white_check_mark:| | | | | +|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:white_check_mark:| | | | | +|[Mocha](https://mochajs.org/#xunit)|JavaScript|:white_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| |:white_check_mark:| +|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:white_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:white_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | +|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:white_check_mark:| | | | | +|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:white_check_mark:| | | | | +|Your favorite
environment|Your favorite
language|probably
:white_check_mark:| | | | | ## What is new in version 2 @@ -236,7 +236,7 @@ The symbols have the following meaning: |Symbol|Meaning| |:----:|-------| -||A successful test or run| +|  :white_check_mark:|A successful test or run| ||A skipped test or run| ||A failed test or run| ||An erroneous test or run| @@ -344,7 +344,7 @@ Here is an example JSON: ```json { "title": "4 parse errors, 4 errors, 23 fail, 18 skipped, 227 pass in 39m 12s", - "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", + "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:white_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:white_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", "conclusion": "success", "stats": { "files": 24, diff --git a/python/publish/__init__.py b/python/publish/__init__.py index e1ff4564..a596b867 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -18,7 +18,7 @@ digest_mime_type = 'application/gzip' digest_encoding = 'base64' digest_header = f'{digest_prefix}{digest_mime_type};{digest_encoding},' -digit_space = '  ' +digit_space = ' ' punctuation_space = ' ' comment_mode_off = 'off' @@ -446,7 +446,7 @@ def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str: all_tests_label_md = 'tests' -passed_tests_label_md = ':heavy_check_mark:' +passed_tests_label_md = ':white_check_mark:' skipped_tests_label_md = ':zzz:' failed_tests_label_md = ':x:' test_errors_label_md = ':fire:' @@ -637,7 +637,7 @@ def get_long_summary_with_runs_md(stats: UnitTestRunResultsOrDeltaResults, runs_error=as_stat_number(stats.runs_error, error_digits, error_delta_digits, test_errors_label_md) ) if get_magnitude(stats.runs_error) else '' runs_line = '{runs} {runs_succ} {runs_skip} {runs_fail}{runs_error_part}\n'.format( - runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), + runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), runs_succ=as_stat_number(stats.runs_succ, success_digits, success_delta_digits, passed_tests_label_md), runs_skip=as_stat_number(stats.runs_skip, skip_digits, skip_delta_digits, skipped_tests_label_md), runs_fail=as_stat_number(stats.runs_fail, fail_digits, fail_delta_digits, failed_tests_label_md), diff --git a/python/test/files/dart/json/tests.annotations b/python/test/files/dart/json/tests.annotations index 63bc9f36..bcb21cb3 100644 --- a/python/test/files/dart/json/tests.annotations +++ b/python/test/files/dart/json/tests.annotations @@ -7,10 +7,10 @@ 'output': { 'title': '2 errors, 1 fail, 1 skipped, 16 pass in 0s', 'summary': - '20 tests\u2002\u2003\u200316 :heavy_check_mark:\u2003\u20030s ' - ':stopwatch:\n\u205f\u20044 suites\u2003\u2003\u205f\u20041 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 :x:\u2003\u20032 :fire:\n' - '\nResults for commit commit s.\n\n' + '20 tests\u2002\u2003\u200316 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20074 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20071 :x:\u2003\u20032 :fire:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REiy4TYkVDpTHqKrp74w+au3nvY97DNzhXwyfWNYwbBzbAQLA4FBa0ImwJabB+6j' 'PMxknpP8diDrhTK4pNwFmJFVGjz5BBp3LR31UwitIL/MsF/tekvi6wBOliZhf8/QAMgVR' diff --git a/python/test/files/junit-xml/bazel/suite-logs.annotations b/python/test/files/junit-xml/bazel/suite-logs.annotations index c4d7686e..61324679 100644 --- a/python/test/files/junit-xml/bazel/suite-logs.annotations +++ b/python/test/files/junit-xml/bazel/suite-logs.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 errors in 0s', 'summary': - '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations index e6410a28..e032cabf 100644 --- a/python/test/files/junit-xml/jest/jest-junit.annotations +++ b/python/test/files/junit-xml/jest/jest-junit.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations index 2eac018c..900001bf 100644 --- a/python/test/files/junit-xml/junit.multiresult.annotations +++ b/python/test/files/junit-xml/junit.multiresult.annotations @@ -8,8 +8,8 @@ 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 1s', 'summary': '1 files\u2004\u20031 suites\u2004\u2003\u20021s :stopwatch:\n4 tests\u2003' - '1 :heavy_check_mark:\u20031 :zzz:\u20031 :x:\u20031 :fire:\n4 runs\u2006\u2003' - '-2 :heavy_check_mark:\u20033 :zzz:\u20032 :x:\u20031 :fire:\n\n' + '1 :white_check_mark:\u20031 :zzz:\u20031 :x:\u20031 :fire:\n4 runs\u200a\u2003' + '-2 :white_check_mark:\u20033 :zzz:\u20032 :x:\u20031 :fire:\n\n' 'Results for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KotfBTeRlCEONGPmYXKuPdlQhEujdvkrn4BkYTX9jQMU4RQoU1ogzgXcZXhKTmsgVFpf' diff --git a/python/test/files/junit-xml/minimal-attributes.annotations b/python/test/files/junit-xml/minimal-attributes.annotations index 73faacfa..22e1f1f9 100644 --- a/python/test/files/junit-xml/minimal-attributes.annotations +++ b/python/test/files/junit-xml/minimal-attributes.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations index d698a0db..fe8ae8dc 100644 --- a/python/test/files/junit-xml/mocha/latex-utensils.annotations +++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations @@ -7,11 +7,10 @@ 'output': { 'title': 'All 101 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 ' - 'suites\u2004\u2003\u20020s :stopwatch:\n101 tests\u2003101 ' - ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n109 runs\u2006\u2003109 ' - ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' - 'commit s.\n\n' + '\u2007\u20071 files\u2004\u2003\u2007\u20071 suites\u2004\u2003\u2002' + '0s :stopwatch:\n101 tests\u2003101 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n109 runs\u200a\u2003109 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv' '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK' 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n', diff --git a/python/test/files/junit-xml/no-attributes.annotations b/python/test/files/junit-xml/no-attributes.annotations index 29db427c..b06aa7a9 100644 --- a/python/test/files/junit-xml/no-attributes.annotations +++ b/python/test/files/junit-xml/no-attributes.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations index d2f513be..0f39fd65 100644 --- a/python/test/files/junit-xml/no-cases-but-tests.annotations +++ b/python/test/files/junit-xml/no-cases-but-tests.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 2 skipped, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations index c580ccc2..8ce031e6 100644 --- a/python/test/files/junit-xml/no-cases.annotations +++ b/python/test/files/junit-xml/no-cases.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/junit-xml/non-junit.annotations b/python/test/files/junit-xml/non-junit.annotations index 617eadba..28635d0b 100644 --- a/python/test/files/junit-xml/non-junit.annotations +++ b/python/test/files/junit-xml/non-junit.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 parse errors', 'summary': - '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n0 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n1 errors\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBBFf' diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations index 103fcee0..969db188 100644 --- a/python/test/files/junit-xml/pytest/junit.fail.annotations +++ b/python/test/files/junit-xml/pytest/junit.fail.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 skipped, 3 pass in 2s', 'summary': - '5 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20032s ' + '5 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20032s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations index b0f0e449..d8c15b5c 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 10 tests pass, 4 skipped in 1m 12s', 'summary': - '14 tests\u2002\u2003\u200310 :heavy_check_mark:\u2003\u20031m 12s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20044 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '14 tests\u2002\u2003\u200310 :white_check_mark:\u2003\u20031m 12s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20074 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd' 'EFKaOpi1lhJ1NeZgGaRPlQiBazwbC9xXj/grcovcSfXOJvTVpjwBPki7lF8PMCyjZFT+I' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations index ee663b01..19251c83 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 20 tests pass, 2 skipped in 10m 27s', 'summary': - '22 tests\u2002\u2003\u200320 :heavy_check_mark:\u2003\u200310m 27s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200320 :white_check_mark:\u2003\u200310m 27s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC' '8MLigVTV3MCns0WcwSNhLlY0K0+BgMJhfj/QveovQSf3KJvzVltQZP8FzMLZKfF82Ojyn' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations index ebbc888c..ed382cfd 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 11m 10s', 'summary': - '22 tests\u2002\u2003\u200322 :heavy_check_mark:\u2003\u200311m 10s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u200311m 10s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN' 'cP9MZJ+TMz7GTSf68ELJkYETVGg25LRX9nwVu8vcCfXOBvTep1BUsQL2Ymwc8LUe9HxOM' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations index 571c1237..d122405b 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 80 tests pass, 17 skipped in 3m 25s', 'summary': - '97 tests\u2002\u2003\u200380 :heavy_check_mark:\u2003\u20033m 25s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u200317 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' - 'commit s.\n\n' + '97 tests\u2002\u2003\u200380 :white_check_mark:\u2003\u20033m 25s ' + ':stopwatch:\n\u20071 suites\u2003\u200317 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv' 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations index 6d50a75f..951ee896 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 12 tests pass, 12 skipped in 1m 9s', 'summary': - '24 tests\u2002\u2003\u200312 :heavy_check_mark:\u2003\u20031m 9s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u200312 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' - 'commit s.\n\n' + '24 tests\u2002\u2003\u200312 :white_check_mark:\u2003\u20031m 9s ' + ':stopwatch:\n\u20071 suites\u2003\u200312 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o' 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations index 6a310f1a..bd91c7c4 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 3 tests pass in 15s', 'summary': - '3 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u200315s ' + '3 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u200315s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations index 6130ffd4..e217dc05 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 96 tests pass, 1 skipped in 3m 39s', 'summary': - '97 tests\u2002\u2003\u200396 :heavy_check_mark:\u2003\u20033m 39s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20041 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '97 tests\u2002\u2003\u200396 :white_check_mark:\u2003\u20033m 39s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf' 'vCYL2UwTSfmWGPgUdoAQuJMgmFaDAa9Fsqhv0LPuLr3Zzlbs5r0qwrOIK4mJ0EPy/3HdY' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations index 97fca517..88026b3c 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 24 tests pass in 2m 4s', 'summary': - '24 tests\u2002\u2003\u200324 :heavy_check_mark:\u2003\u20032m 4s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '24 tests\u2002\u2003\u200324 :white_check_mark:\u2003\u20032m 4s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr' 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK' 'xX6mcWPNjUUUwS10JoIkOfIb/HYthF8BWp93CWezivKbNt6Bi+Jews4boBWo1x8eMAAAA' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations index 2194bc52..309d9557 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 45s', 'summary': - '35 tests\u2002\u2003\u200333 :heavy_check_mark:\u2003\u20032m 45s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 45s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR' 'hC0jqb5jULbGRqFkbBSqJkMSF6fAwmx8W8f8FbvL2LP7mLvzXtrYVI8CwRZiWPEwEjqVj' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations index 3f4e6053..8f977394 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 52s', 'summary': - '35 tests\u2002\u2003\u200333 :heavy_check_mark:\u2003\u20032m 52s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u2003\u205f\u20042 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for ' - 'commit commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 52s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr' '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf' '2CcsGYaKpkJtjI8L8aNMwkchY9osXHYFi5GO9f8Bapd/End/G3ZuyygCd4LuFGLY8TfGY' diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations index a875e388..78adb578 100644 --- a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 5 tests pass in 2s', 'summary': - '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20032s ' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20032s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations index f14f9aff..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-in-testsuite.annotations +++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20034s ' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations index f14f9aff..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-root.annotations +++ b/python/test/files/junit-xml/testsuite-root.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20034s ' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations index 1812a95a..6bc432e1 100644 --- a/python/test/files/junit-xml/tst/disabled.annotations +++ b/python/test/files/junit-xml/tst/disabled.annotations @@ -7,11 +7,10 @@ 'output': { 'title': '1 errors, 19 fail, 5 skipped, 6 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20042 suites\u2004\u2003\u2002' - '0s :stopwatch:\n31 tests\u2003\u205f\u20046 :heavy_check_mark:\u2003' - '5 :zzz:\u200319 :x:\u20031 :fire:\n31 runs\u2006\u200311 ' - ':heavy_check_mark:\u20030 :zzz:\u200319 :x:\u20031 :fire:\n\nResults ' - 'for commit commit s.\n\n' + '\u20071 files\u2004\u2003\u20072 suites\u2004\u2003\u20020s ' + ':stopwatch:\n31 tests\u2003\u20076 :white_check_mark:\u20035 :zzz:\u2003' + '19 :x:\u20031 :fire:\n31 runs\u200a\u200311 :white_check_mark:\u2003' + '0 :zzz:\u200319 :x:\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NQQqAIBBFr' 'yKuW2RRUJcJsaIhyxh1Fd29sdLczXsf3px8Bj1Z3jNRMG49uAcqgtGjdGB2wpKQBhemWk' 'QYrFeKTPuLFQ4STRKzBB3aXTITosHvHfo9FcMdg+IXb7CMnPcekeeU2TZwBN/F7CL5dQP' diff --git a/python/test/files/junit-xml/unsupported-unicode.annotations b/python/test/files/junit-xml/unsupported-unicode.annotations index 2fe151ac..5953a144 100644 --- a/python/test/files/junit-xml/unsupported-unicode.annotations +++ b/python/test/files/junit-xml/unsupported-unicode.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '2 errors, 2 fail, 2 skipped, 1 pass in 8s', 'summary': - '7 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20038s ' + '7 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20038s ' ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' '2 :x:\u2003\u20032 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' diff --git a/python/test/files/junit-xml/with-xml-entities.annotations b/python/test/files/junit-xml/with-xml-entities.annotations index 1bddc789..bc1a93f4 100644 --- a/python/test/files/junit-xml/with-xml-entities.annotations +++ b/python/test/files/junit-xml/with-xml-entities.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 errors, 1 fail, 2 skipped in 0s', 'summary': - '4 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ5AMBBFr' diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations index 1d6aa0d5..7aeb7110 100644 --- a/python/test/files/junit-xml/xunit/xunit.annotations +++ b/python/test/files/junit-xml/xunit/xunit.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/mocha/tests.annotations b/python/test/files/mocha/tests.annotations index 120e7589..9d7ae538 100644 --- a/python/test/files/mocha/tests.annotations +++ b/python/test/files/mocha/tests.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 2 pass in 12s', 'summary': - '5 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u200312s ' + '5 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u200312s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations index 4c6a0c96..cf20a207 100644 --- a/python/test/files/nunit/mstest/clicketyclackety.annotations +++ b/python/test/files/nunit/mstest/clicketyclackety.annotations @@ -7,9 +7,9 @@ 'output': { 'title': '10 fail, 12 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20048 suites\u2004\u2003\u2002' - '0s :stopwatch:\n22 tests\u200312 :heavy_check_mark:\u20030 :zzz:\u2003' - '10 :x:\n23 runs\u2006\u200313 :heavy_check_mark:\u20030 :zzz:\u2003' + '\u20071 files\u2004\u2003\u20078 suites\u2004\u2003\u20020s ' + ':stopwatch:\n22 tests\u200312 :white_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n23 runs\u200a\u200313 :white_check_mark:\u20030 :zzz:\u2003' '10 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv' diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations index c958f5a0..e05c2074 100644 --- a/python/test/files/nunit/mstest/pickles.annotations +++ b/python/test/files/nunit/mstest/pickles.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations index 8f3fb865..dec20d71 100644 --- a/python/test/files/nunit/mstest/timewarpinc.annotations +++ b/python/test/files/nunit/mstest/timewarpinc.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail in 2s', 'summary': - '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20032s ' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20032s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations index b627f2ec..8b60e0ad 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations @@ -7,10 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 8 skipped, 18 pass in 0s', 'summary': - '28 tests\u2002\u2003\u200318 :heavy_check_mark:\u2003\u20030s ' - ':stopwatch:\n11 suites\u2003\u2003\u205f\u20048 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u2003\u205f\u20041 :x:\u2003\u20031 :fire:\n\n' - 'Results for commit commit s.\n\n' + '28 tests\u2002\u2003\u200318 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n11 suites\u2003\u2003\u20078 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHdTJeBlDUGKjiGlhMt7dIqi49b3fvEMaWCeSvWgqISmATxBpDKg8uI25ZuTFx63tHh' 'goaB2/C7PAzuYTRsGa60lMiA6zwbC9xXj/gkl8vZuL3M1lTTtrwTPkS9Cs5HkBSPFg+uI' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations index f7e67371..f8b19140 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations @@ -7,9 +7,9 @@ 'output': { 'title': 'All 183 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002' - '0s :stopwatch:\n183 tests\u2003183 :heavy_check_mark:\u20030 :zzz:\u2003' - '0 :x:\n218 runs\u2006\u2003218 :heavy_check_mark:\u20030 :zzz:\u2003' + '\u2007\u20071 files\u2004\u2003102 suites\u2004\u2003\u20020s ' + ':stopwatch:\n183 tests\u2003183 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n218 runs\u200a\u2003218 :white_check_mark:\u20030 :zzz:\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr' '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations index 6493e048..f678db2a 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 4m 24s', 'summary': - '22 tests\u2002\u2003\u200322 :heavy_check_mark:\u2003\u20034m 24s ' - ':stopwatch:\n13 suites\u2003\u2003\u205f\u20040 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' - 'commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u20034m 24s ' + ':stopwatch:\n13 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV' 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations index 66dc0373..ca0c972e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '3 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations index 9368cf8e..0a746420 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations @@ -7,9 +7,9 @@ 'output': { 'title': '1 fail, 9 pass in 1s', 'summary': - '10 tests\u2002\u2003\u20039 :heavy_check_mark:\u2003\u20031s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u20030 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u20031 :x:\n\nResults for commit commit s.\n\n' + '10 tests\u2002\u2003\u20039 :white_check_mark:\u2003\u20031s ' + ':stopwatch:\n\u20071 suites\u2003\u20030 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv' '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys' 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations index f42a6d45..e9665178 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 1 tests pass, 2 skipped in 0s', 'summary': - '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations index c83c9ccb..dd42640d 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 6 tests pass in 35s', 'summary': - '6 tests\u2002\u2003\u20036 :heavy_check_mark:\u2003\u200335s ' + '6 tests\u2002\u2003\u20036 :white_check_mark:\u2003\u200335s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations index 5eb519c2..4093bcc2 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations @@ -8,8 +8,8 @@ 'title': 'All 1 tests pass, 1 skipped in 6s', 'summary': '1 files\u2004\u20032 suites\u2004\u2003\u20026s :stopwatch:\n2 tests\u2003' - '1 :heavy_check_mark:\u20031 :zzz:\u20030 :x:\n2 runs\u2006\u20032 ' - ':heavy_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' + '1 :white_check_mark:\u20031 :zzz:\u20030 :x:\n2 runs\u200a\u20032 ' + ':white_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations index c743778f..bacfefec 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations @@ -7,10 +7,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' - ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' @@ -1916,10 +1916,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' - ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' @@ -3639,10 +3639,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s :stopwatch:\n146 tests\u2003\u205f\u2004\u205f\u20046 ' - ':heavy_check_mark:\u20030 :zzz:\u2003140 :x:\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 :heavy_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations index 5c896458..c4391213 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 3 tests pass in 17s', 'summary': - '3 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u200317s ' + '3 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u200317s ' ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations index 61ded832..c336d35c 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations index 24b4ced3..fa07d95f 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 6 pass in 0s', 'summary': - '9 tests\u2002\u2003\u20036 :heavy_check_mark:\u2003\u20030s ' + '9 tests\u2002\u2003\u20036 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n3 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MywqAIBREf' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations index 635950f3..2d34ba36 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 3s', 'summary': - '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20033s ' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20033s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations index 2f7352b6..2c8cf7c5 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 4 tests pass in 0s', 'summary': - '4 tests\u2002\u2003\u20034 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20034 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations index da726189..4ca4fd85 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '2 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations index 73d4cc3e..b5614f87 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '2 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations index 9feb4f9d..bf971b3b 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations index 9933a0ad..2e752f71 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations index cc236ef7..fcdfc2a4 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' diff --git a/python/test/files/trx/mstest/pickles.annotations b/python/test/files/trx/mstest/pickles.annotations index f4440d86..684ca48f 100644 --- a/python/test/files/trx/mstest/pickles.annotations +++ b/python/test/files/trx/mstest/pickles.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 :heavy_check_mark:\u2003\u20030s ' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' diff --git a/python/test/files/trx/nunit/FluentValidation.Tests.annotations b/python/test/files/trx/nunit/FluentValidation.Tests.annotations index f36872ff..025b4e12 100644 --- a/python/test/files/trx/nunit/FluentValidation.Tests.annotations +++ b/python/test/files/trx/nunit/FluentValidation.Tests.annotations @@ -7,10 +7,10 @@ 'output': { 'title': 'All 803 tests pass, 1 skipped in 3s', 'summary': - '804 tests\u2002\u2003\u2003803 :heavy_check_mark:\u2003\u20033s ' - ':stopwatch:\n\u205f\u2004\u205f\u20041 suites\u2003\u2003\u205f\u2004\u205f\u2004' - '1 :zzz:\n\u205f\u2004\u205f\u20041 files\u2004\u2002\u2003\u2003\u205f\u2004\u205f\u2004' - '0 :x:\n\nResults for commit commit s.\n\n' + '804 tests\u2002\u2003\u2003803 :white_check_mark:\u2003\u20033s ' + ':stopwatch:\n\u2007\u20071 suites\u2003\u2003\u2007\u20071 :zzz:\n\u2007\u2007' + '1 files\u2004\u2002\u2003\u2003\u2007\u20070 :x:\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dzDsEP1MiBUtacaqp+jf28yybm/mwex8AjN63rG6YtxHCC8MEVUAtxJKQjrCdbWiea' 'j3UeukZFELbDlwi0mBISFeMSI6zAbjWpoX/JO3KcXEn2Dib087ayEQ5MX8rPhxArdpBif' diff --git a/python/test/files/trx/nunit/NUnit-net461-sample.annotations b/python/test/files/trx/nunit/NUnit-net461-sample.annotations index 3a3f0698..700f6de8 100644 --- a/python/test/files/trx/nunit/NUnit-net461-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-net461-sample.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' diff --git a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations index 36b18ffa..9543c8dc 100644 --- a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' diff --git a/python/test/files/trx/nunit/SilentNotes.annotations b/python/test/files/trx/nunit/SilentNotes.annotations index 51410e69..46cef341 100644 --- a/python/test/files/trx/nunit/SilentNotes.annotations +++ b/python/test/files/trx/nunit/SilentNotes.annotations @@ -7,10 +7,9 @@ 'output': { 'title': 'All 67 tests pass, 12 skipped in 0s', 'summary': - '79 tests\u2002\u2003\u200367 :heavy_check_mark:\u2003\u20030s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u200312 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u2003\u205f\u20040 :x:\n\nResults for commit ' - 'commit s.\n\n' + '79 tests\u2002\u2003\u200367 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20071 suites\u2003\u200312 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCopcxBCVuRDELVMa7u/hB7HbmZWfnGszoeMeqgnEXwCcYAkoPdiUsCWnwcRLtC7' '0LSpFpxGdm2OJ7nYyWYPJCPyJafAyGNSXj/SveIgteIutdnOeUXRbwBM/F3CT5cQKN/0L' diff --git a/python/test/files/trx/xunit/dotnet-trx.annotations b/python/test/files/trx/xunit/dotnet-trx.annotations index 21aa04b0..72d086c9 100644 --- a/python/test/files/trx/xunit/dotnet-trx.annotations +++ b/python/test/files/trx/xunit/dotnet-trx.annotations @@ -7,9 +7,9 @@ 'output': { 'title': '5 fail, 1 skipped, 5 pass in 0s', 'summary': - '11 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20030s ' - ':stopwatch:\n\u205f\u20041 suites\u2003\u20031 :zzz:\n\u205f\u20041 ' - 'files\u2004\u2002\u2003\u20035 :x:\n\nResults for commit commit s.\n\n' + '11 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20071 suites\u2003\u20031 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003' + '5 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLaSw8TKGIMaNfMwClfHuroCK3byZ3XfwBYwOfGSiYzwkiC/MCWUE7wh7QhpinsQDU0' 'hKUTN8xQZ7/S7FIsH8LjSix2rE5F7hnVtf4U+XubFlbmXKWwuRoCYWVsnPC2b3Tg/fAAA' diff --git a/python/test/files/trx/xunit/xUnit-net461-sample.annotations b/python/test/files/trx/xunit/xUnit-net461-sample.annotations index 1457b87b..daed1553 100644 --- a/python/test/files/trx/xunit/xUnit-net461-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-net461-sample.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' diff --git a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations index 55ec284d..62c2df45 100644 --- a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 :heavy_check_mark:\u2003\u20030s ' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' diff --git a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations index 406cbb18..46f8d041 100644 --- a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations +++ b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations @@ -7,10 +7,9 @@ 'output': { 'title': '2 fail, 21 skipped, 2 pass in 26s', 'summary': - '25 tests\u2002\u2003\u2003\u205f\u20042 :heavy_check_mark:\u2003\u2003' - '26s :stopwatch:\n\u205f\u20041 suites\u2003\u200321 :zzz:\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20042 :x:\n\nResults for ' - 'commit commit s.\n\n' + '25 tests\u2002\u2003\u2003\u20072 :white_check_mark:\u2003\u200326s ' + ':stopwatch:\n\u20071 suites\u2003\u200321 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '2 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLcRECy9jCErciGAWqIx3d/GD2O3M28zONZjJ856JinEfIWQYI8oAzhI2HTEtIW1N+8' 'Lgo1LJfGKBLQmRjZZgfi8TokMyNRmMNhfTXQZvLnqXKHIXlzXl1hUCwXMxP0t+nB5bCu/' diff --git a/python/test/files/xunit/mstest/fixie.annotations b/python/test/files/xunit/mstest/fixie.annotations index d88fe0fd..eb6a0589 100644 --- a/python/test/files/xunit/mstest/fixie.annotations +++ b/python/test/files/xunit/mstest/fixie.annotations @@ -8,8 +8,8 @@ 'title': '3 fail, 1 skipped, 1 pass in 8s', 'summary': '1 files\u2004\u20032 suites\u2004\u2003\u20028s :stopwatch:\n5 tests\u2003' - '1 :heavy_check_mark:\u20031 :zzz:\u20033 :x:\n7 runs\u2006\u20033 ' - ':heavy_check_mark:\u20031 :zzz:\u20033 :x:\n\nResults for commit ' + '1 :white_check_mark:\u20031 :zzz:\u20033 :x:\n7 runs\u200a\u20033 ' + ':white_check_mark:\u20031 :zzz:\u20033 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QSRZcJsaKh0hh1Fd29Scxs998b5p18hm2yvGdVwbj14ALUBKNH6cBowo6QDu45Ne' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations index 7c07f411..00b40d97 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20030s ' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations index 201d293b..449c69a4 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations @@ -8,8 +8,8 @@ 'title': 'All 1 tests pass in 0s', 'summary': '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n1 tests\u2003' - '1 :heavy_check_mark:\u20030 :zzz:\u20030 :x:\n3 runs\u2006\u20031 ' - ':heavy_check_mark:\u20031 :zzz:\u20031 :x:\n\nResults for commit ' + '1 :white_check_mark:\u20030 :zzz:\u20030 :x:\n3 runs\u200a\u20031 ' + ':white_check_mark:\u20031 :zzz:\u20031 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XoYQhLiRj1mWynh3UZBgN29m8k5uwOrIFzYNjMcE1GBNKAmCzzhmzAN905tFTE' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations index 90cdc84a..27b9ab3d 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 4m 48s', 'summary': - '3 tests\u2002\u2003\u20031 :heavy_check_mark:\u2003\u20034m 48s ' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20034m 48s ' ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations index c580ccc2..8ce031e6 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations index 61c20690..806cceb4 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations @@ -7,7 +7,7 @@ 'output': { 'title': 'All 5 tests pass in 1m 32s', 'summary': - '5 tests\u2002\u2003\u20035 :heavy_check_mark:\u2003\u20031m 32s ' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20031m 32s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations index 11da6003..f0aa4cd5 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations @@ -7,7 +7,7 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 :heavy_check_mark:\u2003\u20030s ' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' diff --git a/python/test/files/xunit/mstest/pickles.annotations b/python/test/files/xunit/mstest/pickles.annotations index 04cdcc69..caf7ff2a 100644 --- a/python/test/files/xunit/mstest/pickles.annotations +++ b/python/test/files/xunit/mstest/pickles.annotations @@ -8,8 +8,8 @@ 'title': '1 fail, 2 pass in 0s', 'summary': '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n3 tests\u2003' - '2 :heavy_check_mark:\u20030 :zzz:\u20031 :x:\n4 runs\u2006\u20033 ' - ':heavy_check_mark:\u20030 :zzz:\u20031 :x:\n\nResults for commit ' + '2 :white_check_mark:\u20030 :zzz:\u20031 :x:\n4 runs\u200a\u20033 ' + ':white_check_mark:\u20030 :zzz:\u20031 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLVSsvIwhqHEjH7NAZby7qwLSzZuZvJOvoBfPR9Y1jPsIocAcUQZwlrAlpCE8k8h58l' diff --git a/python/test/test_publish.py b/python/test/test_publish.py index 58dc97e1..294d90f7 100644 --- a/python/test/test_publish.py +++ b/python/test/test_publish.py @@ -7,7 +7,7 @@ from publish import Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ get_json_path, get_error_annotation, get_digest_from_stats, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ - duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, \ + duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, digit_space, \ get_magnitude, get_delta, as_short_commit, as_delta, as_stat_number, as_stat_duration, get_stats_from_digest, \ digest_string, ungest_string, get_details_line_md, get_commit_line_md, restrict_unicode, \ get_short_summary, get_short_summary_md, get_long_summary_md, get_long_summary_with_runs_md, \ @@ -420,42 +420,42 @@ def test_as_delta(self): self.assertEqual(as_delta(+1, 1), '+1') self.assertEqual(as_delta(-2, 1), ' - 2') - self.assertEqual(as_delta(0, 2), '±  0') - self.assertEqual(as_delta(+1, 2), '+  1') - self.assertEqual(as_delta(-2, 2), ' -   2') + self.assertEqual(as_delta(0, 2), f'±{digit_space}0') + self.assertEqual(as_delta(+1, 2), f'+{digit_space}1') + self.assertEqual(as_delta(-2, 2), f' - {digit_space}2') - self.assertEqual(as_delta(1, 5), '+       1') - self.assertEqual(as_delta(12, 5), '+     12') - self.assertEqual(as_delta(123, 5), '+   123') + self.assertEqual(as_delta(1, 5), f'+{digit_space} {digit_space}{digit_space}1') + self.assertEqual(as_delta(12, 5), f'+{digit_space} {digit_space}12') + self.assertEqual(as_delta(123, 5), f'+{digit_space} 123') self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') with temp_locale('en_US'): self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') with temp_locale('de_DE'): self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') def test_as_stat_number(self): label = 'unit' self.assertEqual(as_stat_number(None, 1, 0, label), 'N/A unit') self.assertEqual(as_stat_number(1, 1, 0, label), '1 unit') - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') with temp_locale('en_US'): - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') with temp_locale('de_DE'): - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') self.assertEqual(as_stat_number(dict(number=1), 1, 0, label), '1 unit') @@ -463,16 +463,16 @@ def test_as_stat_number(self): self.assertEqual(as_stat_number(dict(number=1, delta=-1), 1, 1, label), '1 unit  - 1 ') self.assertEqual(as_stat_number(dict(number=2, delta=+0), 1, 1, label), '2 unit ±0 ') self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 1, label), '3 unit +1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 2, label), '3 unit +  1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1), 2, 2, label), '  3 unit +  1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 2, label), f'3 unit +{digit_space}1 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1), 2, 2, label), f'{digit_space}3 unit +{digit_space}1 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') with temp_locale('en_US'): - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') with temp_locale('de_DE'): - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), f'3 unit +12 345 ') self.assertEqual(as_stat_number(dict(delta=-1), 3, 1, label), 'N/A unit  - 1 ') @@ -555,7 +555,7 @@ def test_get_short_summary(self): def test_label_md(self): self.assertEqual(all_tests_label_md, 'tests') - self.assertEqual(passed_tests_label_md, f':heavy_check_mark:') + self.assertEqual(passed_tests_label_md, f':white_check_mark:') self.assertEqual(skipped_tests_label_md, f':zzz:') self.assertEqual(failed_tests_label_md, f':x:') self.assertEqual(test_errors_label_md, f':fire:') @@ -673,9 +673,9 @@ def test_get_long_summary_with_runs_md(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=0, runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=0, commit='commit' - )), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + )), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' f'\n' f'Results for commit commit.\n')) @@ -685,9 +685,9 @@ def test_get_long_summary_with_runs_md_with_errors(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8, runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13, commit='commit' - )), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + )), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' f'\n' f'Results for commit commit.\n')) @@ -697,9 +697,9 @@ def test_get_long_summary_with_runs_md_with_deltas(self): tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9), runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14), commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef' - )), (f'1 files  +  2    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + )), (f'1 files  +{digit_space}2  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' f'\n' f'Results for commit 12345678. ± Comparison against type commit 01234567.\n')) @@ -712,9 +712,9 @@ def test_get_long_summary_with_runs_md_with_details_url_with_fails(self): commit='commit' ), 'https://details.url/' - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' f'\n' f'For more details on these failures, see [this check](https://details.url/).\n' f'\n' @@ -730,9 +730,9 @@ def test_get_long_summary_with_runs_md_with_details_url_without_fails(self): commit='commit' ), 'https://details.url/' - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' f'\n' f'Results for commit commit.\n') ) @@ -750,9 +750,9 @@ def test_get_long_summary_with_runs_md_with_test_lists(self): ['test1', 'test2', 'test3', 'test4', 'test5'], ['test5', 'test6'], ['test2'], ['test5', 'test6'] ), - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1013,9 +1013,9 @@ def test_get_long_summary_with_digest_md_with_multiple_runs(self): ) ) - self.assertEqual(actual, f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1037,9 +1037,9 @@ def test_get_long_summary_with_digest_md_with_test_errors(self): ) ) - self.assertEqual(actual, f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1061,9 +1061,9 @@ def test_get_long_summary_with_digest_md_with_parse_errors(self): ) ) - self.assertEqual(actual, f'1 files    1 errors    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}1 errors  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1090,9 +1090,9 @@ def test_get_long_summary_with_digest_md_with_delta(self): ) ) - self.assertEqual(actual, f'1 files  +  2    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + self.assertEqual(actual, f'1 files  +{digit_space}2  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' '\n' 'Results for commit 12345678. ± Comparison against type commit 01234567.\n' '\n' @@ -1119,9 +1119,9 @@ def test_get_long_summary_with_digest_md_with_delta_and_parse_errors(self): ) ) - self.assertEqual(actual, f'1 files  +  2    1 errors    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + self.assertEqual(actual, f'1 files  +{digit_space}2  {digit_space}1 errors  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' '\n' 'Results for commit 12345678. ± Comparison against type commit 01234567.\n' '\n' @@ -2079,9 +2079,9 @@ def test_files(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'  10 files    10 suites   39m 1s {duration_label_md}\n' - f'217 {all_tests_label_md} 208 {passed_tests_label_md}   9 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'373 runs  333 {passed_tests_label_md} 40 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + self.assertEqual(md, (f'{digit_space}10 files  {digit_space}10 suites   39m 1s {duration_label_md}\n' + f'217 {all_tests_label_md} 208 {passed_tests_label_md} {digit_space}9 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'373 runs  333 {passed_tests_label_md} 40 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' f'\n' f'Results for commit example.\n')) @@ -2150,8 +2150,8 @@ def test_files_without_annotations(self): stats = get_stats(results) md = get_long_summary_md(stats) self.assertEqual(md, (f'373 {all_tests_label_md}   333 {passed_tests_label_md}  39m 1s {duration_label_md}\n' - f'  10 suites    40 {skipped_tests_label_md}\n' - f'  10 files        0 {failed_tests_label_md}\n' + f'{digit_space}10 suites  {digit_space}40 {skipped_tests_label_md}\n' + f'{digit_space}10 files    {digit_space}{digit_space}0 {failed_tests_label_md}\n' f'\n' f'Results for commit example.\n')) diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index e4db3392..71ca1c32 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -19,7 +19,7 @@ get_error_annotation, digest_header, get_digest_from_stats, \ all_tests_list, skipped_tests_list, none_annotations, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ - duration_label_md, pull_request_build_mode_merge, punctuation_space, \ + duration_label_md, digit_space, pull_request_build_mode_merge, punctuation_space, \ get_long_summary_with_digest_md from publish.github_action import GithubAction from publish.publisher import Publisher, Settings, PublishData @@ -670,15 +670,15 @@ def test_publish_comment_compare_earlier_with_restricted_unicode(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('create_issue_comment', method) self.assertEqual(('## title\n' - '\u205f\u20041 files\u2004 ±\u205f\u20040\u2002\u2003' + f'{digit_space}1 files\u2004 ±{digit_space}0\u2002\u2003' '2 suites\u2004 +1\u2002\u2003\u2002' f'3s :stopwatch: +2s\n' '22 tests +19\u2002\u2003' - f'4 :heavy_check_mark: +3\u2002\u2003' - f'5 :zzz: +3\u2002\u2003\u205f\u2004' - f'6 :x: +\u205f\u20046\u2002\u2003\u205f\u2004' - f'7 :fire: +\u205f\u20047\u2002\n' - f'38 runs\u2006 +35\u2002\u20038 :heavy_check_mark: +7\u2002\u2003' + f'4 :white_check_mark: +3\u2002\u2003' + f'5 :zzz: +3\u2002\u2003{digit_space}' + f'6 :x: +{digit_space}6\u2002\u2003{digit_space}' + f'7 :fire: +{digit_space}7\u2002\n' + f'38 runs\u200a +35\u2002\u20038 :white_check_mark: +7\u2002\u2003' f'9 :zzz: +7\u2002\u2003' f'10 :x: +10\u2002\u2003' f'11 :fire: +11\u2002\n' @@ -1304,9 +1304,9 @@ def do_test_publish_check_without_base_stats(self, errors: List[ParseError], ann output={ 'title': '{}7 errors, 6 fail, 5 skipped, 4 pass in 3s' .format('{} parse errors, '.format(len(errors)) if len(errors) > 0 else ''), - 'summary': f'\u205f\u20041 files\u2004\u2003{{errors}}2 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + 'summary': f'{digit_space}1 files\u2004\u2003{{errors}}2 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1334,7 +1334,7 @@ def do_test_publish_check_without_base_stats(self, errors: List[ParseError], ann 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  {summary_errors}2 suites   3s :stopwatch:\\n22 tests 4 :heavy_check_mark: 5 :zzz:   6 :x:   7 :fire:\\n38 runs  8 :heavy_check_mark: 9 :zzz: 10 :x: 11 :fire:\\n\\nResults for commit commit.\\n", ' + f'"summary": "{digit_space}1 files  {summary_errors}2 suites   3s :stopwatch:\\n22 tests 4 :white_check_mark: 5 :zzz: {digit_space}6 :x: {digit_space}7 :fire:\\n38 runs  8 :white_check_mark: 9 :zzz: 10 :x: 11 :fire:\\n\\nResults for commit commit.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' f'"annotations": {len(annotations)}, ' @@ -1371,9 +1371,9 @@ def do_test_publish_check_with_base_stats(self, errors: List[ParseError]): output={ 'title': '{}7 errors, 6 fail, 5 skipped, 4 pass in 3s' .format('{} parse errors, '.format(len(errors)) if len(errors) > 0 else ''), - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u2003{{errors}}2 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u2003{{errors}}2 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1406,7 +1406,7 @@ def do_test_publish_check_with_base_stats(self, errors: List[ParseError]): 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  ±0  {summary_errors}2 suites  ±0   3s :stopwatch: ±0s\\n22 tests +1  4 :heavy_check_mark:  -   8  5 :zzz: +1    6 :x: +4    7 :fire: +  4 \\n38 runs  +1  8 :heavy_check_mark:  - 17  9 :zzz: +2  10 :x: +6  11 :fire: +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' + f'"summary": "{digit_space}1 files  ±0  {summary_errors}2 suites  ±0   3s :stopwatch: ±0s\\n22 tests +1  4 :white_check_mark:  - {digit_space}8  5 :zzz: +1  {digit_space}6 :x: +4  {digit_space}7 :fire: +{digit_space}4 \\n38 runs  +1  8 :white_check_mark:  - 17  9 :zzz: +2  10 :x: +6  11 :fire: +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' '"stats_with_delta": {"files": {"number": 1, "delta": 0}, ' + f'"errors": {len(errors)}, ' + '"suites": {"number": 2, "delta": 0}, "duration": {"duration": 3, "delta": 0}, "tests": {"number": 22, "delta": 1}, "tests_succ": {"number": 4, "delta": -8}, "tests_skip": {"number": 5, "delta": 1}, "tests_fail": {"number": 6, "delta": 4}, "tests_error": {"number": 7, "delta": 4}, "runs": {"number": 38, "delta": 1}, "runs_succ": {"number": 8, "delta": -17}, "runs_skip": {"number": 9, "delta": 2}, "runs_fail": {"number": 10, "delta": 6}, "runs_error": {"number": 11, "delta": 10}, "commit": "commit", "reference_type": "earlier", "reference_commit": "past"}, ' @@ -1437,9 +1437,9 @@ def test_publish_check_without_compare(self): conclusion='conclusion', output={ 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + 'summary': f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1499,9 +1499,9 @@ def test_publish_check_with_multiple_annotation_pages(self): conclusion='conclusion', output={ 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1532,9 +1532,9 @@ def test_publish_check_with_multiple_annotation_pages(self): outputs = [ { 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1660,9 +1660,9 @@ def test_publish_check_with_suite_details(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' - f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' - f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' + 'summary': f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1696,9 +1696,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s :stopwatch:\\n' - f'7 tests   1 :heavy_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' - f'3 runs  -12 :heavy_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' + f'1 files  {digit_space}1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests {digit_space}1 :white_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :white_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1732,9 +1732,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' - f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' - f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' + f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1783,9 +1783,9 @@ def test_publish_check_with_cases(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' - f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' - f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' + 'summary': f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1820,9 +1820,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s :stopwatch:\\n' - f'7 tests   1 :heavy_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' - f'3 runs  -12 :heavy_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' + f'1 files  {digit_space}1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests {digit_space}1 :white_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :white_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1906,9 +1906,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' - f'7 tests\u2003\u205f\u20041 :heavy_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' - f'3 runs\u2006\u2003-12 :heavy_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' + f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -2250,9 +2250,9 @@ def test_publish_job_summary_without_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s :stopwatch:\n' - f'22 tests\u20034 :heavy_check_mark:\u20035 :zzz:\u2003\u205f\u20046 :x:\u2003\u205f\u20047 :fire:\n' - f'38 runs\u2006\u20038 :heavy_check_mark:\u20039 :zzz:\u200310 :x:\u200311 :fire:\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s :stopwatch:\n' + f'22 tests\u20034 :white_check_mark:\u20035 :zzz:\u2003{digit_space}6 :x:\u2003{digit_space}7 :fire:\n' + f'38 runs\u200a\u20038 :white_check_mark:\u20039 :zzz:\u200310 :x:\u200311 :fire:\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2281,9 +2281,9 @@ def test_publish_job_summary_with_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s :stopwatch: -1s\n' - f'22 tests +2\u2002\u20034 :heavy_check_mark: \u2006-\u200a1\u2002\u20035 :zzz: +1\u2002\u2003\u205f\u20046 :x: +1\u2002\u2003\u205f\u20047 :fire: +1\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 :heavy_check_mark: \u2006-\u200a2\u2002\u20039 :zzz: ±0\u2002\u200310 :x: +2\u2002\u200311 :fire: +4\u2002\n' + f'{digit_space}1 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s :stopwatch: -1s\n' + f'22 tests +2\u2002\u20034 :white_check_mark: \u2006-\u200a1\u2002\u20035 :zzz: +1\u2002\u2003{digit_space}6 :x: +1\u2002\u2003{digit_space}7 :fire: +1\u2002\n' + f'38 runs\u200a +1\u2002\u20038 :white_check_mark: \u2006-\u200a2\u2002\u20039 :zzz: ±0\u2002\u200310 :x: +2\u2002\u200311 :fire: +4\u2002\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2306,9 +2306,9 @@ def test_publish_comment(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against base commit base.\n' '\n' @@ -2349,9 +2349,9 @@ def test_publish_comment_without_base(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -2374,9 +2374,9 @@ def test_publish_comment_without_compare(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -2400,9 +2400,9 @@ def test_publish_comment_with_check_run_with_annotations(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2432,9 +2432,9 @@ def test_publish_comment_with_check_run_without_annotations(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u20030 {failed_tests_label_md} \u2006-\u200a2\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u20030 {failed_tests_label_md} \u2006-\u200a4\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u20030 {failed_tests_label_md} \u2006-\u200a2\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u20030 {failed_tests_label_md} \u2006-\u200a4\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against base commit base.\n' '\n' From dd656270a0e3b61114602ace08af7959bcf9ed32 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Mon, 18 Dec 2023 17:55:48 +0100 Subject: [PATCH 23/28] Fix failing on no files (#543) * Fix failing on no files (Inconclusive conclusion is actually called neutral) * Log inconclusive when conclusion is neutral --- python/publish_test_results.py | 5 +++-- python/test/test_action_script.py | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/python/publish_test_results.py b/python/publish_test_results.py index e1ab1c5b..fd81a9a5 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -227,7 +227,7 @@ def log_parse_errors(errors: List[ParseError], gha: GithubAction): def action_fail_required(conclusion: str, action_fail: bool, action_fail_on_inconclusive: bool) -> bool: return action_fail and conclusion == 'failure' or \ - action_fail_on_inconclusive and conclusion == 'inconclusive' + action_fail_on_inconclusive and conclusion == 'neutral' def main(settings: Settings, gha: GithubAction) -> None: @@ -269,7 +269,8 @@ def main(settings: Settings, gha: GithubAction) -> None: Publisher(settings, gh, gha).publish(stats, results.case_results, conclusion) if action_fail_required(conclusion, settings.action_fail, settings.action_fail_on_inconclusive): - gha.error(f'This action finished successfully, but test results have status {conclusion}.') + status = f"{conclusion} / inconclusive" if conclusion == "neutral" else conclusion + gha.error(f'This action finished successfully, but test results have status {status}.') sys.exit(1) diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index dae244d9..c3b79961 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -1300,12 +1300,12 @@ def test_deprecate_val(self): def test_action_fail(self): for action_fail, action_fail_on_inconclusive, expecteds in [ - (False, False, [False] * 3), - (False, True, [True, False, False]), - (True, False, [False, False, True]), - (True, True, [True, False, True]), + (False, False, [False] * 4), + (False, True, [True, False, False, False]), + (True, False, [False, False, True, False]), + (True, True, [True, False, True, False]), ]: - for expected, conclusion in zip(expecteds, ['inconclusive', 'success', 'failure']): + for expected, conclusion in zip(expecteds, ['neutral', 'success', 'failure', 'unknown']): with self.subTest(action_fail=action_fail, action_fail_on_inconclusive=action_fail_on_inconclusive, conclusion=conclusion): actual = action_fail_required(conclusion, action_fail, action_fail_on_inconclusive) self.assertEqual(expected, actual) From d764099b3a7539319715f1c2b632f920e83e2184 Mon Sep 17 00:00:00 2001 From: Jay Turner Date: Tue, 19 Dec 2023 13:57:57 +0000 Subject: [PATCH 24/28] Add option to disable status check (#532) Co-authored-by: Enrico Minack --- .github/workflows/publish.yml | 1 + README.md | 11 ++++++++++ action.yml | 4 ++++ composite/action.yml | 5 +++++ python/publish/publisher.py | 34 ++++++++++++++++--------------- python/publish_test_results.py | 2 ++ python/test/test_action_script.py | 11 ++++++++++ python/test/test_publisher.py | 18 ++++++++++++++++ 8 files changed, 70 insertions(+), 16 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 530b68c3..219c60d0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -125,6 +125,7 @@ jobs: -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ -e "INPUT_LARGE_FILES" \ -e "INPUT_IGNORE_RUNS" \ + -e "INPUT_CHECK_RUN" \ -e "INPUT_JOB_SUMMARY" \ -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ -e "INPUT_PULL_REQUEST_BUILD" \ diff --git a/README.md b/README.md index 5cfb54b2..2f6cb4dc 100644 --- a/README.md +++ b/README.md @@ -185,6 +185,8 @@ Those are highlighted in pull request comments to easily spot unintended test re ***Note:** This requires `check_run_annotations` to be set to `all tests, skipped tests`.* +Comments can be disabled with `comment_mode: off`. + ### Commit and pull request checks The checks section of a commit and related pull requests list a short summary (here `1 fail, 1 skipped, …`), @@ -198,6 +200,8 @@ Pull request checks: ![pull request checks example](misc/github-pull-request-checks.png) +Check runs can be disabled with `check_run: false`. + ### Commit and pull request annotations Each failing test produces an annotation with failure details in the checks section of a commit: @@ -213,6 +217,8 @@ Use option `test_file_prefix` to add a prefix to, or remove a prefix from these ***Note:** Only the first failure of a test is shown. If you want to see all failures, set `report_individual_runs: "true"`.* +Check run annotations can be disabled with `ignore_runs: true`. + ### GitHub Actions job summary The results are added to the job summary page of the workflow that runs this action: @@ -223,12 +229,16 @@ In presence of failures or errors, the job summary links to the respective [chec ***Note:** Job summary requires [GitHub Actions runner v2.288.0](https://github.com/actions/runner/releases/tag/v2.288.0) or above.* +Job summaries can be disabled with `job_summary: false`. + ### GitHub Actions check summary of a commit Test results are published in the GitHub Actions check summary of the respective commit: ![checks comment example](misc/github-checks-comment.png) +Check runs can be disabled with `check_run: false`. + ## The symbols [comment]: <> (This heading is linked to from method get_link_and_tooltip_label_md) @@ -295,6 +305,7 @@ The list of most notable options: |:-----|:-----:|:----------| |`time_unit`|`seconds`|Time values in the test result files have this unit. Supports `seconds` and `milliseconds`.| |`test_file_prefix`|`none`|Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".| +|`check_run`|`true`|Set to `true`, the results are published as a check run, but it may not be associated with the workflow that ran this action.| |`job_summary`|`true`|Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| |`compare_to_earlier_commit`|`true`|Test results are compared to results of earlier commits to show changes:
`false` - disable comparison, `true` - compare across commits.'| |`test_changes_limit`|`10`|Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of `0`.| diff --git a/action.yml b/action.yml index 135a555c..37996d55 100644 --- a/action.yml +++ b/action.yml @@ -80,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' diff --git a/composite/action.yml b/composite/action.yml index 0db7e50c..26b5eedb 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -80,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' @@ -293,6 +297,7 @@ runs: JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }} JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} + CHECK_RUN: ${{ inputs.check_run }} JOB_SUMMARY: ${{ inputs.job_summary }} SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }} # not documented diff --git a/python/publish/publisher.py b/python/publish/publisher.py index 16869d79..2b5ec7c8 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -59,6 +59,7 @@ class Settings: check_name: str comment_title: str comment_mode: str + check_run: bool job_summary: bool compare_earlier: bool pull_request_build: str @@ -196,21 +197,22 @@ def publish(self, if logger.isEnabledFor(logging.DEBUG): logger.debug(f'Publishing {stats}') - if self._settings.is_fork: - # running on a fork, we cannot publish the check, but we can still read before_check_run - # bump the version if you change the target of this link (if it did not exist already) or change the section - logger.info('This action is running on a pull_request event for a fork repository. ' - 'Pull request comments and check runs cannot be created, so disabling these features. ' - 'To fully run the action on fork repository pull requests, see ' - f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') - check_run = None - before_check_run = None - if self._settings.compare_earlier: - before_commit_sha = get_json_path(self._settings.event, 'before') - logger.debug(f'comparing against before={before_commit_sha}') - before_check_run = self.get_check_run(before_commit_sha) - else: - check_run, before_check_run = self.publish_check(stats, cases, conclusion) + check_run = None + before_check_run = None + if self._settings.check_run: + if self._settings.is_fork: + # running on a fork, we cannot publish the check, but we can still read before_check_run + # bump the version if you change the target of this link (if it did not exist already) or change the section + logger.info('This action is running on a pull_request event for a fork repository. ' + 'Pull request comments and check runs cannot be created, so disabling these features. ' + 'To fully run the action on fork repository pull requests, see ' + f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') + if self._settings.compare_earlier: + before_commit_sha = get_json_path(self._settings.event, 'before') + logger.debug(f'comparing against before={before_commit_sha}') + before_check_run = self.get_check_run(before_commit_sha) + else: + check_run, before_check_run = self.publish_check(stats, cases, conclusion) if self._settings.job_summary: self.publish_job_summary(self._settings.comment_title, stats, check_run, before_check_run) @@ -468,7 +470,7 @@ def publish_json(self, data: PublishData): def publish_job_summary(self, title: str, stats: UnitTestRunResults, - check_run: CheckRun, + check_run: Optional[CheckRun], before_check_run: Optional[CheckRun]): before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats diff --git a/python/publish_test_results.py b/python/publish_test_results.py index fd81a9a5..9bf3dd11 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -397,6 +397,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: event = json.load(f) repo = get_var('GITHUB_REPOSITORY', options) + check_run = get_bool_var('CHECK_RUN', options, default=True) job_summary = get_bool_var('JOB_SUMMARY', options, default=True) comment_mode = get_var('COMMENT_MODE', options) or comment_mode_always @@ -475,6 +476,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: check_name=check_name, comment_title=get_var('COMMENT_TITLE', options) or check_name, comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=get_bool_var('COMPARE_TO_EARLIER_COMMIT', options, default=True), pull_request_build=get_var('PULL_REQUEST_BUILD', options) or 'merge', diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index c3b79961..26992c0c 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -184,6 +184,7 @@ def get_settings(token='token', check_name='check name', comment_title='title', comment_mode=comment_mode_always, + check_run=True, job_summary=True, compare_earlier=True, test_changes_limit=10, @@ -233,6 +234,7 @@ def get_settings(token='token', check_name=check_name, comment_title=comment_title, comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=compare_earlier, pull_request_build=pull_request_build, @@ -456,6 +458,15 @@ def test_get_settings_compare_to_earlier_commit(self): self.do_test_get_settings(COMPARE_TO_EARLIER_COMMIT='foo', expected=self.get_settings(compare_earlier=True), warning=warning, exception=RuntimeError) self.do_test_get_settings(COMPARE_TO_EARLIER_COMMIT=None, expected=self.get_settings(compare_earlier=True)) + def test_get_settings_check_run(self): + warning = 'Option check_run has to be boolean, so either "true" or "false": foo' + self.do_test_get_settings(CHECK_RUN='false', expected=self.get_settings(check_run=False)) + self.do_test_get_settings(CHECK_RUN='False', expected=self.get_settings(check_run=False)) + self.do_test_get_settings(CHECK_RUN='true', expected=self.get_settings(check_run=True)) + self.do_test_get_settings(CHECK_RUN='True', expected=self.get_settings(check_run=True)) + self.do_test_get_settings(CHECK_RUN='foo', expected=self.get_settings(check_run=True), warning=warning, exception=RuntimeError) + self.do_test_get_settings(CHECK_RUN=None, expected=self.get_settings(check_run=True)) + def test_get_settings_job_summary(self): warning = 'Option job_summary has to be boolean, so either "true" or "false": foo' self.do_test_get_settings(JOB_SUMMARY='false', expected=self.get_settings(job_summary=False)) diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index 71ca1c32..c403a1bd 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -79,6 +79,7 @@ def create_github_pr(repo: str, @staticmethod def create_settings(actor='actor', comment_mode=comment_mode_always, + check_run=True, job_summary=True, compare_earlier=True, report_individual_runs=False, @@ -124,6 +125,7 @@ def create_settings(actor='actor', check_name='Check Name', comment_title='Comment Title', comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=compare_earlier, pull_request_build=pull_request_build, @@ -491,6 +493,22 @@ def test_publish_without_job_summary_and_comment(self): self.assertEqual((self.stats, self.cases, 'success'), args) self.assertEqual({}, kwargs) + def test_publish_without_job_summary_and_comment_on_fork(self): + settings = self.create_settings(is_fork=True, comment_mode=comment_mode_off, job_summary=False) + mock_calls = self.call_mocked_publish(settings, prs=[object()]) + + self.assertEqual(1, len(mock_calls)) + (method, args, kwargs) = mock_calls[0] + self.assertEqual('get_check_run', method) + self.assertEqual(('before', ), args) + self.assertEqual({}, kwargs) + + def test_publish_without_check_run_job_summary_and_comment(self): + settings = self.create_settings(comment_mode=comment_mode_off, job_summary=False, check_run=False) + mock_calls = self.call_mocked_publish(settings, prs=[object()]) + + self.assertEqual(0, len(mock_calls)) + def test_publish_with_comment_without_pr(self): settings = self.create_settings() mock_calls = self.call_mocked_publish(settings, prs=[]) From bea86167e24a0f5646b8faa2eb546785a99a1fe9 Mon Sep 17 00:00:00 2001 From: Adrian Dischinger <72736591+AdrianDsg@users.noreply.github.com> Date: Fri, 22 Dec 2023 13:57:14 +0100 Subject: [PATCH 25/28] Upgrade CI and Python dependencies (#523) Co-authored-by: Enrico Minack --- .github/actions/test/action.yml | 2 +- .github/workflows/badges.yml | 4 ++-- .github/workflows/ci-cd.yml | 14 +++++++------- .github/workflows/codeql.yml | 8 ++++---- .github/workflows/publish.yml | 20 ++++++++++---------- .github/workflows/test-os.yml | 2 +- composite/action.yml | 2 +- python/requirements-direct.txt | 2 +- python/requirements.txt | 18 +++++++++++------- 9 files changed, 38 insertions(+), 34 deletions(-) diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml index a07fbff1..606c9a10 100644 --- a/.github/actions/test/action.yml +++ b/.github/actions/test/action.yml @@ -27,7 +27,7 @@ runs: python-version: ${{ inputs.python-version }} - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Detect OS id: os diff --git a/.github/workflows/badges.yml b/.github/workflows/badges.yml index 542f2e2d..0bcfa397 100644 --- a/.github/workflows/badges.yml +++ b/.github/workflows/badges.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get package downloads id: downloads @@ -58,7 +58,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Fetch workflows id: workflows diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index a24080ad..ec584696 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check requirements.txt against requirements-direct.txt run: | (diff -w python/requirements-direct.txt python/requirements.txt || true) | (! grep -e "^<") @@ -75,7 +75,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Extract action image and version # we deploy from a specific commit on master (the one that mentions a new version the first time) @@ -109,7 +109,7 @@ jobs: steps: - name: Docker meta id: docker-meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: ghcr.io/EnricoMi/publish-unit-test-result-action flavor: | @@ -123,20 +123,20 @@ jobs: type=semver,pattern={{version}},value=${{ needs.config-deploy.outputs.image-version }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.CR_PAT }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: tags: ${{ steps.docker-meta.outputs.tags }} labels: ${{ steps.docker-meta.outputs.labels }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3be9041d..0f994b24 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,11 +31,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -46,7 +46,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -60,4 +60,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 219c60d0..feaae1ea 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download Artifacts uses: actions/download-artifact@v3 @@ -57,7 +57,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -66,11 +66,11 @@ jobs: platforms: ${{ matrix.arch }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build Docker image id: build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: load: true push: false @@ -199,7 +199,7 @@ jobs: - name: Scan for vulnerabilities id: scan - uses: crazy-max/ghaction-container-scan@v2 + uses: crazy-max/ghaction-container-scan@v3 with: image: enricomi/publish-unit-test-result-action:latest dockerfile: ./Dockerfile @@ -211,7 +211,7 @@ jobs: path: ${{ steps.scan.outputs.sarif }} - name: Upload SARIF file if: always() && steps.scan.outputs.sarif != '' - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: ${{ steps.scan.outputs.sarif }} @@ -258,11 +258,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python if: matrix.python != 'installed' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} @@ -299,7 +299,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Copy test result files run: cp -rv python/test/files test-files @@ -345,7 +345,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Copy test junit xml files run: cp -rv python/test/files/junit-xml test-files diff --git a/.github/workflows/test-os.yml b/.github/workflows/test-os.yml index 9f90a0c0..28482f2e 100644 --- a/.github/workflows/test-os.yml +++ b/.github/workflows/test-os.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Test uses: ./.github/actions/test diff --git a/composite/action.yml b/composite/action.yml index 26b5eedb..f9651c11 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -201,7 +201,7 @@ runs: continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-df386fe4e04a72c96e140f0566a5c849 + key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-fc884bb0b8d89fb24ccb9a84a3d97821 - name: Create virtualenv id: venv diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt index 2b17f6ce..dfc0768a 100644 --- a/python/requirements-direct.txt +++ b/python/requirements-direct.txt @@ -1,5 +1,5 @@ humanize==3.14.0 junitparser==3.1.0 lxml==4.9.3 -psutil==5.9.5 +psutil==5.9.7 PyGithub==2.1.1 diff --git a/python/requirements.txt b/python/requirements.txt index 0904d348..76b9b240 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -2,20 +2,24 @@ humanize==3.14.0 junitparser==3.1.0 future==0.18.3 lxml==4.9.3 -psutil==5.9.5 +psutil==5.9.7 PyGithub==2.1.1 Deprecated==1.2.14 - wrapt==1.15.0 + wrapt==1.16.0 PyJWT==2.8.0 PyNaCl==1.5.0 + # latest version that support Python 3.7 cffi==1.15.1 pycparser==2.21 python-dateutil==2.8.2 six==1.16.0 requests==2.31.0 - certifi==2023.7.22 - charset-normalizer==3.3.0 - idna==3.4 - urllib3==2.0.6 + certifi==2023.11.17 + charset-normalizer==3.3.2 + idna==3.6 + # latest version that support Python 3.7 + urllib3==2.0.7 + # latest version that support Python 3.7 typing_extensions==4.7.1 - urllib3==2.0.6 + # latest version that support Python 3.7 + urllib3==2.0.7 From e780361cd1fc1b1a170624547b3ffda64787d365 Mon Sep 17 00:00:00 2001 From: Enrico Minack Date: Fri, 22 Dec 2023 16:19:33 +0100 Subject: [PATCH 26/28] Releasing v2.12.0 --- action.yml | 2 +- python/publish/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/action.yml b/action.yml index 37996d55..fa2f717c 100644 --- a/action.yml +++ b/action.yml @@ -148,7 +148,7 @@ outputs: runs: using: 'docker' - image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.11.0' + image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.12.0' branding: icon: 'check-circle' diff --git a/python/publish/__init__.py b/python/publish/__init__.py index a596b867..05e70d36 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -11,7 +11,7 @@ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError # keep the version in sync with action.yml -__version__ = 'v2.11.0' +__version__ = 'v2.12.0' logger = logging.getLogger('publish') digest_prefix = '[test-results]:data:' From 4a497bd0bb5f675d6b17ed1f7456341b30b148c4 Mon Sep 17 00:00:00 2001 From: Adam Hernandez Date: Fri, 22 Dec 2023 09:00:57 -0700 Subject: [PATCH 27/28] resync --- .github/actions/test/action.yml | 136 ++++ .github/workflows/badges.yml | 26 +- .github/workflows/ci-cd.yml | 585 +----------------- .github/workflows/codeql.yml | 8 +- .github/workflows/publish.yml | 376 +++++++++++ .github/workflows/test-os.yml | 31 + README.md | 105 +++- action.yml | 12 +- composite/action.yml | 61 +- misc/badge-arm.svg | 20 + misc/badge-dart.svg | 20 - misc/badge-js.svg | 20 + misc/badge-junit-xml.svg | 20 - misc/badge-mocha.svg | 20 - misc/badge-nunit-xml.svg | 20 - misc/badge-trx.svg | 24 +- misc/badge-xml.svg | 20 + misc/badge-xunit-xml.svg | 20 - python/publish/__init__.py | 36 +- python/publish/junit.py | 21 +- python/publish/publisher.py | 54 +- python/publish_test_results.py | 30 +- python/requirements-direct.txt | 8 +- python/requirements.txt | 22 +- python/test/files/dart/json/tests.annotations | 19 +- .../junit-xml/bazel/suite-logs.annotations | 18 +- .../junit-xml/jest/jest-junit.annotations | 16 +- .../junit-xml/junit.multiresult.annotations | 26 +- .../junit-xml/minimal-attributes.annotations | 18 +- .../mocha/latex-utensils.annotations | 23 +- .../files/junit-xml/no-attributes.annotations | 18 +- .../junit-xml/no-cases-but-tests.annotations | 16 +- .../test/files/junit-xml/no-cases.annotations | 16 +- .../files/junit-xml/non-junit.annotations | 16 +- .../junit-xml/pytest/junit.fail.annotations | 16 +- .../pytest/junit.gloo.elastic.annotations | 16 +- .../junit.gloo.elastic.spark.tf.annotations | 16 +- ...junit.gloo.elastic.spark.torch.annotations | 16 +- .../pytest/junit.gloo.standalone.annotations | 16 +- .../pytest/junit.gloo.static.annotations | 16 +- .../pytest/junit.mpi.integration.annotations | 16 +- .../pytest/junit.mpi.standalone.annotations | 16 +- .../pytest/junit.mpi.static.annotations | 16 +- .../junit.spark.integration.1.annotations | 16 +- .../junit.spark.integration.2.annotations | 16 +- ...ch.spark.diff.DiffOptionsSuite.annotations | 16 +- .../testsuite-in-testsuite.annotations | 16 +- .../junit-xml/testsuite-root.annotations | 16 +- .../files/junit-xml/tst/disabled.annotations | 27 +- .../junit-xml/unsupported-unicode.annotations | 18 +- .../junit-xml/with-xml-entities.annotations | 18 +- .../files/junit-xml/xunit/xunit.annotations | 16 +- python/test/files/mocha/tests.annotations | 18 +- .../nunit/mstest/clicketyclackety.annotations | 23 +- .../files/nunit/mstest/pickles.annotations | 16 +- .../nunit/mstest/timewarpinc.annotations | 16 +- .../nunit3/jenkins/NUnit-correct.annotations | 18 +- .../nunit3/jenkins/NUnit-correct2.annotations | 23 +- .../nunit3/jenkins/NUnit-correct3.annotations | 16 +- .../nunit3/jenkins/NUnit-failure.annotations | 16 +- .../jenkins/NUnit-healthReport.annotations | 16 +- .../nunit3/jenkins/NUnit-ignored.annotations | 16 +- .../jenkins/NUnit-issue1077.annotations | 16 +- .../jenkins/NUnit-issue33493.annotations | 19 +- .../jenkins/NUnit-issue44527.annotations | 72 +-- .../jenkins/NUnit-issue48478.annotations | 16 +- .../jenkins/NUnit-issue50162.annotations | 16 +- .../jenkins/NUnit-issue5674.annotations | 16 +- .../jenkins/NUnit-issue6353.annotations | 16 +- .../jenkins/NUnit-multinamespace.annotations | 16 +- .../jenkins/NUnit-sec1752-file.annotations | 16 +- .../jenkins/NUnit-sec1752-https.annotations | 16 +- .../nunit3/jenkins/NUnit-simple.annotations | 16 +- .../nunit/nunit3/jenkins/NUnit.annotations | 16 +- .../nunit3/jenkins/NUnitUnicode.annotations | 16 +- .../test/files/trx/mstest/pickles.annotations | 16 +- .../nunit/FluentValidation.Tests.annotations | 17 +- .../trx/nunit/NUnit-net461-sample.annotations | 16 +- .../NUnit-netcoreapp3.1-sample.annotations | 16 +- .../files/trx/nunit/SilentNotes.annotations | 16 +- .../files/trx/xunit/dotnet-trx.annotations | 16 +- .../trx/xunit/xUnit-net461-sample.annotations | 16 +- .../xUnit-netcoreapp3.1-sample.annotations | 16 +- ...mi_YAMILEX 2015-10-24 04_18_59.annotations | 16 +- .../test/files/xunit/mstest/fixie.annotations | 19 +- .../mstest/jenkinsci/testcase1.annotations | 16 +- .../mstest/jenkinsci/testcase2.annotations | 19 +- .../mstest/jenkinsci/testcase3.annotations | 16 +- .../mstest/jenkinsci/testcase4.annotations | 16 +- .../mstest/jenkinsci/testcase5.annotations | 16 +- .../mstest/jenkinsci/testcase6.annotations | 16 +- .../files/xunit/mstest/pickles.annotations | 19 +- python/test/requirements.txt | 1 - python/test/test_action_script.py | 97 ++- python/test/test_cicd_yml.py | 2 +- python/test/test_junit.py | 47 +- python/test/test_publish.py | 166 ++--- python/test/test_publisher.py | 184 +++--- 98 files changed, 1459 insertions(+), 1949 deletions(-) create mode 100644 .github/actions/test/action.yml create mode 100644 .github/workflows/publish.yml create mode 100644 .github/workflows/test-os.yml create mode 100644 misc/badge-arm.svg delete mode 100644 misc/badge-dart.svg create mode 100644 misc/badge-js.svg delete mode 100644 misc/badge-junit-xml.svg delete mode 100644 misc/badge-mocha.svg delete mode 100644 misc/badge-nunit-xml.svg create mode 100644 misc/badge-xml.svg delete mode 100644 misc/badge-xunit-xml.svg diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml new file mode 100644 index 00000000..606c9a10 --- /dev/null +++ b/.github/actions/test/action.yml @@ -0,0 +1,136 @@ +name: 'Test' +author: 'EnricoMi' +description: 'A GitHub Action that tests this action' + +inputs: + os: + description: operating system, e.g. ubuntu-22.04 + required: true + python-version: + description: Python version, e.g. 3.11 + required: true + +runs: + using: 'composite' + steps: + - name: Setup Ubuntu + if: startsWith(inputs.os, 'ubuntu') + run: | + sudo apt-get update + sudo apt-get install language-pack-en language-pack-de + shell: bash + + - name: Setup Python + if: inputs.python-version != 'installed' + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Detect OS + id: os + env: + OS: ${{ inputs.os }} + run: | + case "$OS" in + ubuntu*) + echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT + ;; + macos*) + echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT + ;; + windows*) + echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT + ;; + esac + echo "date=$(date +%Y%m%d 2> /dev/null || true)" >> $GITHUB_OUTPUT + shell: bash + + - name: Cache PIP Packages + uses: actions/cache@v3 + id: cache + with: + path: ${{ steps.os.outputs.pip-cache }} + key: ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }} + restore-keys: | + ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}- + ${{ inputs.os }}-pip-test-${{ inputs.python-version }}- + ${{ inputs.os }}-pip-test- + + - name: Install Python dependencies + run: | + python3 -V + python3 -m pip freeze | sort + python3 -m pip cache info || true + python3 -m pip cache list || true + python3 -m pip install --upgrade --force pip wheel + python3 -m pip install --force -r python/requirements.txt + python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt + python3 -m pip freeze | sort + python3 -m pip cache info || true + python3 -m pip cache list || true + shell: bash + + - name: Update expectation files + id: changes + continue-on-error: true + run: | + python/test/files/update_expectations.sh + git status + + if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]] + then + # we only upload the changed files if we can find zip + if which zip + then + (git diff --name-only && git ls-files -o --exclude-standard) | xargs -d "\n" zip changed-expectations.zip + exit 1 + fi + fi + shell: bash + - name: Upload changed expectation files + if: steps.changes.outcome == 'failure' + uses: actions/upload-artifact@v3 + with: + name: Changed expectations + path: changed-expectations.zip + if-no-files-found: error + + - name: PyTest + env: + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml + shell: bash + + - name: PyTest (EST) + env: + TZ: US/Eastern + LANG: "en_US.UTF-8" + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml + shell: bash + + - name: PyTest (CET) + env: + TZ: Europe/Berlin + LANG: "de_DE.UTF-8" + PYTHONPATH: .. + run: | + cd python/test + python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml + shell: bash + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v3 + with: + name: Test Results (python-${{ inputs.python-version }}, ${{ inputs.os }}) + path: | + test-results/*.xml + unit-test-results.json diff --git a/.github/workflows/badges.yml b/.github/workflows/badges.yml index af1a1334..0bcfa397 100644 --- a/.github/workflows/badges.yml +++ b/.github/workflows/badges.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get package downloads id: downloads @@ -32,6 +32,10 @@ jobs: color: blue path: downloads.svg + - name: Create JSON + run: | + echo '{"subject": "Docker pulls", "status": "${{ steps.downloads.outputs.total_downloads }} (${{ steps.downloads.outputs.recent_downloads_per_day }}/day)", "color": "blue"}' > downloads.json + - name: Upload badge to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: @@ -39,6 +43,13 @@ jobs: gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 file: downloads.svg + - name: Upload JSON to Gist + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + file: downloads.json + workflows: name: Dependent workflows runs-on: ubuntu-latest @@ -47,7 +58,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Fetch workflows id: workflows @@ -61,9 +72,20 @@ jobs: color: blue path: workflows.svg + - name: Create JSON + run: | + echo '{"subject": "GitHub Workflows", "status": "${{ steps.workflows.outputs.total_workflows }}", "color": "blue"}' > workflows.json + - name: Upload badge to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 file: workflows.svg + + - name: Upload JSON to Gist + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + file: workflows.json diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index d43dce31..ec584696 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check requirements.txt against requirements-direct.txt run: | (diff -w python/requirements-direct.txt python/requirements.txt || true) | (! grep -e "^<") @@ -30,567 +30,41 @@ jobs: .github/upgrade-pip-packages.sh shell: bash - test: - name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }}) - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: - - macos-11 - - macos-12 - - macos-latest - - ubuntu-20.04 - - ubuntu-22.04 - - ubuntu-latest - - windows-2019 - - windows-2022 - - windows-latest - python-version: ["3.8", "installed"] + test-mac: + name: "Test macOS" + uses: "./.github/workflows/test-os.yml" + with: + os: '["macos-11", "macos-12", "macos-13"]' - include: - - os: macos-latest - python-version: "3.11" - - os: ubuntu-latest - python-version: "3.11" - # installing lxml fails for Python 3.11 on Windows + test-lnx: + name: "Test Ubuntu" + uses: "./.github/workflows/test-os.yml" + with: + os: '["ubuntu-20.04", "ubuntu-22.04"]' - - os: macos-latest - python-version: "3.10" - - os: ubuntu-latest - python-version: "3.10" - - os: windows-latest - python-version: "3.10" + test-win: + name: "Test Windows" + uses: "./.github/workflows/test-os.yml" + with: + os: '["windows-2019", "windows-2022"]' - - os: macos-latest - python-version: "3.9" - - os: ubuntu-latest - python-version: "3.9" - - os: windows-latest - python-version: "3.9" - - - os: macos-latest - python-version: "3.7" - - os: ubuntu-latest - python-version: "3.7" - - os: windows-latest - python-version: "3.7" - - - steps: - - name: Setup Ubuntu - if: startsWith(matrix.os, 'ubuntu') - run: | - sudo apt-get update - sudo apt-get install language-pack-en language-pack-de - shell: bash - - - name: Setup Python - if: matrix.python-version != 'installed' - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Checkout - uses: actions/checkout@v3 - - - name: Detect OS - id: os - env: - OS: ${{ matrix.os }} - run: | - case "$OS" in - ubuntu*) - echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT - ;; - macos*) - echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT - ;; - windows*) - echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT - ;; - esac - echo "date=$(date +%Y%m%d 2> /dev/null || true)" >> $GITHUB_OUTPUT - shell: bash - - - name: Cache PIP Packages - uses: actions/cache@v3 - id: cache - with: - path: ${{ steps.os.outputs.pip-cache }} - key: ${{ matrix.os }}-pip-test-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }} - restore-keys: | - ${{ matrix.os }}-pip-test-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}- - ${{ matrix.os }}-pip-test-${{ matrix.python-version }}- - ${{ matrix.os }}-pip-test- - - - name: Install Python dependencies - run: | - python3 -V - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true - python3 -m pip install --upgrade --force pip wheel - python3 -m pip install --force -r python/requirements.txt - python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true - shell: bash - - - name: Update expectation files - id: changes - continue-on-error: true - run: | - python/test/files/update_expectations.sh - git status - - if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]] - then - zip changes.zip $(git diff --name-only) $(git ls-files -o --exclude-standard) - exit 1 - fi - shell: bash - - name: Upload changed expectation files - if: steps.changes.outcome == 'failure' - uses: actions/upload-artifact@v3 - with: - name: Changed expectations - path: changed-expectations.zip - - - name: PyTest - env: - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml - shell: bash - - - name: PyTest (EST) - env: - TZ: US/Eastern - LANG: "en_US.UTF-8" - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml - shell: bash - - - name: PyTest (CET) - env: - TZ: Europe/Berlin - LANG: "de_DE.UTF-8" - PYTHONPATH: .. - run: | - cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml - shell: bash - - - name: Upload Test Results - if: always() - uses: actions/upload-artifact@v3 - with: - name: Test Results (python-${{ matrix.python-version }}, ${{ matrix.os }}) - path: | - test-results/*.xml - unit-test-results.json - - publish-dockerfile: - name: Publish Test Results (Dockerfile) - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Dockerfile) - files: "artifacts/**/*.xml" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-docker-image: - name: Publish Test Results (Docker Image) - needs: test + publish: + name: "Publish" + needs: [test-mac, test-lnx, test-win] # we run the action from this branch whenever we can (when it runs in our repo's context) if: > - always() && + ! cancelled() && github.event.sender.login != 'dependabot[bot]' && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest + uses: "./.github/workflows/publish.yml" permissions: checks: write pull-requests: write security-events: write - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build Docker image - id: build - uses: docker/build-push-action@v4 - with: - load: true - push: false - tags: enricomi/publish-unit-test-result-action:latest - outputs: type=docker - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Publish Test Results - id: test-results - if: always() - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - INPUT_CHECK_NAME: Test Results (Docker Image) - INPUT_FILES: "artifacts/**/*.xml" - INPUT_JSON_FILE: "tests.json" - INPUT_JSON_SUITE_DETAILS: true - INPUT_JSON_TEST_CASE_RESULTS: true - INPUT_REPORT_SUITE_LOGS: "any" - run: | - docker run \ - --workdir $GITHUB_WORKSPACE \ - --rm \ - -e "INPUT_CHECK_NAME" \ - -e "INPUT_JSON_FILE" \ - -e "INPUT_JSON_SUITE_DETAILS" \ - -e "INPUT_JSON_TEST_CASE_RESULTS" \ - -e "INPUT_LOG_LEVEL" \ - -e "INPUT_ROOT_LOG_LEVEL" \ - -e "INPUT_GITHUB_TOKEN" \ - -e "INPUT_GITHUB_TOKEN_ACTOR" \ - -e "INPUT_GITHUB_RETRIES" \ - -e "INPUT_COMMIT" \ - -e "INPUT_COMMENT_TITLE" \ - -e "INPUT_COMMENT_MODE" \ - -e "INPUT_FAIL_ON" \ - -e "INPUT_ACTION_FAIL" \ - -e "INPUT_ACTION_FAIL_ON_INCONCLUSIVE" \ - -e "INPUT_FILES" \ - -e "INPUT_JUNIT_FILES" \ - -e "INPUT_NUNIT_FILES" \ - -e "INPUT_XUNIT_FILES" \ - -e "INPUT_TRX_FILES" \ - -e "INPUT_TIME_UNIT" \ - -e "INPUT_REPORT_INDIVIDUAL_RUNS" \ - -e "INPUT_REPORT_SUITE_LOGS" \ - -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ - -e "INPUT_LARGE_FILES" \ - -e "INPUT_IGNORE_RUNS" \ - -e "INPUT_JOB_SUMMARY" \ - -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ - -e "INPUT_PULL_REQUEST_BUILD" \ - -e "INPUT_EVENT_FILE" \ - -e "INPUT_EVENT_NAME" \ - -e "INPUT_TEST_CHANGES_LIMIT" \ - -e "INPUT_CHECK_RUN_ANNOTATIONS" \ - -e "INPUT_CHECK_RUN_ANNOTATIONS_BRANCH" \ - -e "INPUT_SECONDS_BETWEEN_GITHUB_READS" \ - -e "INPUT_SECONDS_BETWEEN_GITHUB_WRITES" \ - -e "INPUT_SECONDARY_RATE_LIMIT_WAIT_SECONDS" \ - -e "INPUT_JSON_THOUSANDS_SEPARATOR" \ - -e "INPUT_SEARCH_PULL_REQUESTS" \ - -e "HOME" \ - -e "GITHUB_JOB" \ - -e "GITHUB_REF" \ - -e "GITHUB_SHA" \ - -e "GITHUB_REPOSITORY" \ - -e "GITHUB_REPOSITORY_OWNER" \ - -e "GITHUB_RUN_ID" \ - -e "GITHUB_RUN_NUMBER" \ - -e "GITHUB_RETENTION_DAYS" \ - -e "GITHUB_RUN_ATTEMPT" \ - -e "GITHUB_ACTOR" \ - -e "GITHUB_TRIGGERING_ACTOR" \ - -e "GITHUB_WORKFLOW" \ - -e "GITHUB_HEAD_REF" \ - -e "GITHUB_BASE_REF" \ - -e "GITHUB_EVENT_NAME" \ - -e "GITHUB_SERVER_URL" \ - -e "GITHUB_API_URL" \ - -e "GITHUB_GRAPHQL_URL" \ - -e "GITHUB_REF_NAME" \ - -e "GITHUB_REF_PROTECTED" \ - -e "GITHUB_REF_TYPE" \ - -e "GITHUB_WORKSPACE" \ - -e "GITHUB_ACTION" \ - -e "GITHUB_EVENT_PATH" \ - -e "GITHUB_ACTION_REPOSITORY" \ - -e "GITHUB_ACTION_REF" \ - -e "GITHUB_PATH" \ - -e "GITHUB_ENV" \ - -e "GITHUB_STEP_SUMMARY" \ - -e "GITHUB_STATE" \ - -e "GITHUB_OUTPUT" \ - -e "RUNNER_OS" \ - -e "RUNNER_ARCH" \ - -e "RUNNER_NAME" \ - -e "RUNNER_TOOL_CACHE" \ - -e "RUNNER_TEMP" \ - -e "RUNNER_WORKSPACE" \ - -e "ACTIONS_RUNTIME_URL" \ - -e "ACTIONS_RUNTIME_TOKEN" \ - -e "ACTIONS_CACHE_URL" \ - -e GITHUB_ACTIONS=true \ - -e CI=true \ - -v "$RUNNER_TEMP":"$RUNNER_TEMP" \ - -v "/var/run/docker.sock":"/var/run/docker.sock" \ - -v "/home/runner/work/_temp/_github_home":"/github/home" \ - -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ - -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ - -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ - enricomi/publish-unit-test-result-action:latest - shell: bash - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - - name: Scan for vulnerabilities - id: scan - uses: crazy-max/ghaction-container-scan@v2 - with: - image: enricomi/publish-unit-test-result-action:latest - dockerfile: ./Dockerfile - annotations: true - - name: Upload SARIF artifact - uses: actions/upload-artifact@v3 - with: - name: SARIF - path: ${{ steps.scan.outputs.sarif }} - - name: Upload SARIF file - if: always() && steps.scan.outputs.sarif != '' - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: ${{ steps.scan.outputs.sarif }} - - publish-composite: - name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ${{ matrix.os }} - permissions: - checks: write - pull-requests: write - - strategy: - fail-fast: false - max-parallel: 3 - matrix: - # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources - # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) - include: - - os: macos-latest - os-label: macOS - python: "3.8" - - os: macos-latest - os-label: macOS - python: "installed" - - os: macos-11 - os-label: macOS 11 - python: "installed" - - - os: ubuntu-latest - os-label: Linux - python: "3.8" - - os: ubuntu-latest - os-label: Linux - python: "installed" - - os: ubuntu-20.04 - os-label: Linux 20.04 - python: "installed" - - - os: windows-latest - os-label: Windows - python: "installed" - - os: windows-2019 - os-label: Windows 2019 - python: "installed" - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Setup Python - if: matrix.python != 'installed' - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python }} - - - name: Download Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Publish Test Results - id: test-results - uses: ./composite - with: - check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) - files: | - artifacts/**/*.xml - artifacts\**\*.xml - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-test-files: - name: Publish Test Files - # does not really depend on 'tests' but can be executed together with other publish tasks just for good taste - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Copy test result files - run: cp -rv python/test/files test-files - shell: bash - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Test Files) - fail_on: nothing - files: | - test-files/**/*.xml - test-files/**/*.trx - test-files/**/*.json - junit_files: "test-files/junit-xml/**/*.xml" - nunit_files: "test-files/nunit/**/*.xml" - xunit_files: "test-files/xunit/**/*.xml" - trx_files: "test-files/trx/**/*.trx" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - - publish-test-file: - name: Publish Test File - # does not really depend on 'tests' but can be executed together with other publish tasks just for good taste - needs: test - # we run the action from this branch whenever we can (when it runs in our repo's context) - if: > - always() && - github.event.sender.login != 'dependabot[bot]' && - ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository ) - runs-on: ubuntu-latest - permissions: - checks: write - pull-requests: write - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Copy test junit xml files - run: cp -rv python/test/files/junit-xml test-files - shell: bash - - - name: Prepare publish action from this branch - run: | - sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml - shell: bash - - - name: Publish Test Results - id: test-results - uses: ./ - with: - check_name: Test Results (Test File) - fail_on: nothing - files: "test-files/pytest/junit.gloo.standalone.xml" - json_file: "tests.json" - json_suite_details: true - json_test_case_results: true - report_suite_logs: "any" - log_level: DEBUG - - - name: JSON output - uses: ./misc/action/json-output - with: - json: '${{ steps.test-results.outputs.json }}' - json_file: 'tests.json' - config-deploy: name: Configure Deployment - needs: test + needs: [test-mac, test-lnx, test-win] # do not build or deploy on forked repositories if: github.repository_owner == 'EnricoMi' runs-on: ubuntu-latest @@ -601,7 +75,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Extract action image and version # we deploy from a specific commit on master (the one that mentions a new version the first time) @@ -627,7 +101,7 @@ jobs: deploy: name: Deploy to GitHub - needs: [test, publish-dockerfile, publish-docker-image, publish-composite, publish-test-file, publish-test-files, config-deploy] + needs: [publish, config-deploy] # do not build or deploy on forked repositories if: github.repository_owner == 'EnricoMi' @@ -635,7 +109,7 @@ jobs: steps: - name: Docker meta id: docker-meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: ghcr.io/EnricoMi/publish-unit-test-result-action flavor: | @@ -649,23 +123,24 @@ jobs: type=semver,pattern={{version}},value=${{ needs.config-deploy.outputs.image-version }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.CR_PAT }} - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: tags: ${{ steps.docker-meta.outputs.tags }} labels: ${{ steps.docker-meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 pull: true # deploy image actions from commits pushed to master and # deploy Dockerfile actions from pushed version tags (no major versions) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3be9041d..0f994b24 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,11 +31,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -46,7 +46,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -60,4 +60,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..feaae1ea --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,376 @@ +name: Publish + +on: + workflow_call: + +jobs: + publish-dockerfile: + name: Publish Test Results (Dockerfile) + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Dockerfile) + files: "artifacts/**/*.xml" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-docker-image: + name: Publish Test Results (Docker Image ${{ matrix.arch }}) + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + security-events: write + strategy: + fail-fast: false + matrix: + arch: [amd64, arm64] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + image: tonistiigi/binfmt:latest + platforms: ${{ matrix.arch }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker image + id: build + uses: docker/build-push-action@v5 + with: + load: true + push: false + platforms: linux/${{ matrix.arch }} + tags: enricomi/publish-unit-test-result-action:latest + outputs: type=docker + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + if: always() + env: + INPUT_GITHUB_TOKEN: ${{ github.token }} + INPUT_CHECK_NAME: Test Results (Docker Image ${{ matrix.arch }}) + INPUT_FILES: "artifacts/**/*.xml" + INPUT_JSON_FILE: "tests.json" + INPUT_JSON_SUITE_DETAILS: true + INPUT_JSON_TEST_CASE_RESULTS: true + INPUT_REPORT_SUITE_LOGS: "any" + run: | + docker run --platform linux/${{ matrix.arch }} \ + --workdir $GITHUB_WORKSPACE \ + --rm \ + -e "INPUT_CHECK_NAME" \ + -e "INPUT_JSON_FILE" \ + -e "INPUT_JSON_SUITE_DETAILS" \ + -e "INPUT_JSON_TEST_CASE_RESULTS" \ + -e "INPUT_LOG_LEVEL" \ + -e "INPUT_ROOT_LOG_LEVEL" \ + -e "INPUT_GITHUB_TOKEN" \ + -e "INPUT_GITHUB_TOKEN_ACTOR" \ + -e "INPUT_GITHUB_RETRIES" \ + -e "INPUT_COMMIT" \ + -e "INPUT_COMMENT_TITLE" \ + -e "INPUT_COMMENT_MODE" \ + -e "INPUT_FAIL_ON" \ + -e "INPUT_ACTION_FAIL" \ + -e "INPUT_ACTION_FAIL_ON_INCONCLUSIVE" \ + -e "INPUT_FILES" \ + -e "INPUT_JUNIT_FILES" \ + -e "INPUT_NUNIT_FILES" \ + -e "INPUT_XUNIT_FILES" \ + -e "INPUT_TRX_FILES" \ + -e "INPUT_TIME_UNIT" \ + -e "INPUT_TEST_FILE_PREFIX" \ + -e "INPUT_REPORT_INDIVIDUAL_RUNS" \ + -e "INPUT_REPORT_SUITE_LOGS" \ + -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ + -e "INPUT_LARGE_FILES" \ + -e "INPUT_IGNORE_RUNS" \ + -e "INPUT_CHECK_RUN" \ + -e "INPUT_JOB_SUMMARY" \ + -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ + -e "INPUT_PULL_REQUEST_BUILD" \ + -e "INPUT_EVENT_FILE" \ + -e "INPUT_EVENT_NAME" \ + -e "INPUT_TEST_CHANGES_LIMIT" \ + -e "INPUT_CHECK_RUN_ANNOTATIONS" \ + -e "INPUT_CHECK_RUN_ANNOTATIONS_BRANCH" \ + -e "INPUT_SECONDS_BETWEEN_GITHUB_READS" \ + -e "INPUT_SECONDS_BETWEEN_GITHUB_WRITES" \ + -e "INPUT_SECONDARY_RATE_LIMIT_WAIT_SECONDS" \ + -e "INPUT_JSON_THOUSANDS_SEPARATOR" \ + -e "INPUT_SEARCH_PULL_REQUESTS" \ + -e "HOME" \ + -e "GITHUB_JOB" \ + -e "GITHUB_REF" \ + -e "GITHUB_SHA" \ + -e "GITHUB_REPOSITORY" \ + -e "GITHUB_REPOSITORY_OWNER" \ + -e "GITHUB_RUN_ID" \ + -e "GITHUB_RUN_NUMBER" \ + -e "GITHUB_RETENTION_DAYS" \ + -e "GITHUB_RUN_ATTEMPT" \ + -e "GITHUB_ACTOR" \ + -e "GITHUB_TRIGGERING_ACTOR" \ + -e "GITHUB_WORKFLOW" \ + -e "GITHUB_HEAD_REF" \ + -e "GITHUB_BASE_REF" \ + -e "GITHUB_EVENT_NAME" \ + -e "GITHUB_SERVER_URL" \ + -e "GITHUB_API_URL" \ + -e "GITHUB_GRAPHQL_URL" \ + -e "GITHUB_REF_NAME" \ + -e "GITHUB_REF_PROTECTED" \ + -e "GITHUB_REF_TYPE" \ + -e "GITHUB_WORKSPACE" \ + -e "GITHUB_ACTION" \ + -e "GITHUB_EVENT_PATH" \ + -e "GITHUB_ACTION_REPOSITORY" \ + -e "GITHUB_ACTION_REF" \ + -e "GITHUB_PATH" \ + -e "GITHUB_ENV" \ + -e "GITHUB_STEP_SUMMARY" \ + -e "GITHUB_STATE" \ + -e "GITHUB_OUTPUT" \ + -e "RUNNER_OS" \ + -e "RUNNER_ARCH" \ + -e "RUNNER_NAME" \ + -e "RUNNER_TOOL_CACHE" \ + -e "RUNNER_TEMP" \ + -e "RUNNER_WORKSPACE" \ + -e "ACTIONS_RUNTIME_URL" \ + -e "ACTIONS_RUNTIME_TOKEN" \ + -e "ACTIONS_CACHE_URL" \ + -e GITHUB_ACTIONS=true \ + -e CI=true \ + -v "$RUNNER_TEMP":"$RUNNER_TEMP" \ + -v "/var/run/docker.sock":"/var/run/docker.sock" \ + -v "/home/runner/work/_temp/_github_home":"/github/home" \ + -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ + -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ + -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ + enricomi/publish-unit-test-result-action:latest + shell: bash + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + - name: Scan for vulnerabilities + id: scan + uses: crazy-max/ghaction-container-scan@v3 + with: + image: enricomi/publish-unit-test-result-action:latest + dockerfile: ./Dockerfile + annotations: true + - name: Upload SARIF artifact + uses: actions/upload-artifact@v3 + with: + name: SARIF + path: ${{ steps.scan.outputs.sarif }} + - name: Upload SARIF file + if: always() && steps.scan.outputs.sarif != '' + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: ${{ steps.scan.outputs.sarif }} + + publish-composite: + name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + runs-on: ${{ matrix.os }} + permissions: + checks: write + pull-requests: write + + strategy: + fail-fast: false + max-parallel: 3 + matrix: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) + include: + - os: macos-latest + os-label: macOS + python: "3.8" + - os: macos-latest + os-label: macOS + python: "installed" + - os: macos-11 + os-label: macOS 11 + python: "installed" + + - os: ubuntu-latest + os-label: Linux + python: "3.8" + - os: ubuntu-latest + os-label: Linux + python: "installed" + - os: ubuntu-20.04 + os-label: Linux 20.04 + python: "installed" + + - os: windows-latest + os-label: Windows + python: "installed" + - os: windows-2019 + os-label: Windows 2019 + python: "installed" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + if: matrix.python != 'installed' + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + uses: ./composite + with: + check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + files: | + artifacts/**/*.xml + artifacts\**\*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-test-files: + name: Publish Test Files + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Copy test result files + run: cp -rv python/test/files test-files + shell: bash + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Test Files) + fail_on: nothing + files: | + test-files/**/*.xml + test-files/**/*.trx + test-files/**/*.json + junit_files: "test-files/junit-xml/**/*.xml" + nunit_files: "test-files/nunit/**/*.xml" + xunit_files: "test-files/xunit/**/*.xml" + trx_files: "test-files/trx/**/*.trx" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-test-file: + name: Publish Test File + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Copy test junit xml files + run: cp -rv python/test/files/junit-xml test-files + shell: bash + + - name: Prepare publish action from this branch + run: | + sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml + shell: bash + + - name: Publish Test Results + id: test-results + uses: ./ + with: + check_name: Test Results (Test File) + fail_on: nothing + files: "test-files/pytest/junit.gloo.standalone.xml" + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + log_level: DEBUG + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' diff --git a/.github/workflows/test-os.yml b/.github/workflows/test-os.yml new file mode 100644 index 00000000..28482f2e --- /dev/null +++ b/.github/workflows/test-os.yml @@ -0,0 +1,31 @@ +name: Test OS + +on: + workflow_call: + inputs: + os: + required: true + type: string +jobs: + test: + name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: ${{ fromJson(inputs.os) }} + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12.0-rc.3", "installed"] + + include: + - os: ${{ fromJson(inputs.os)[0] }} + python-version: "3.7" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Test + uses: ./.github/actions/test + with: + os: ${{ matrix.os }} + python-version: ${{ matrix.python-version }} diff --git a/README.md b/README.md index 22bc3005..deac75c7 100644 --- a/README.md +++ b/README.md @@ -6,16 +6,13 @@ [![GitHub Workflows badge](https://gist.github.com/im-open/612cb538c14731f1a8fefe504f519395/raw/workflows.svg)](https://github.com/search?q=publish-unit-test-result-action+path%3A.github%2Fworkflows%2F+language%3AYAML+language%3AYAML&type=Code&l=YAML) [![Docker pulls badge](https://gist.github.com/im-open/612cb538c14731f1a8fefe504f519395/raw/downloads.svg)](https://github.com/users/im-open/packages/container/package/publish-unit-test-result-action) +![Arm badge](misc/badge-arm.svg) ![Ubuntu badge](misc/badge-ubuntu.svg) ![macOS badge](misc/badge-macos.svg) ![Windows badge](misc/badge-windows.svg) -![JUnit badge](misc/badge-junit-xml.svg) -![NUnit badge](misc/badge-nunit-xml.svg) -![XUnit badge](misc/badge-xunit-xml.svg) +![XML badge](misc/badge-xml.svg) ![TRX badge](misc/badge-trx.svg) -![Dart badge](misc/badge-dart.svg) -![Mocha badge](misc/badge-mocha.svg) - +![JS badge](misc/badge-js.svg) [![Test Results](https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395/raw/tests.svg)](https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395/raw/tests.svg) @@ -23,7 +20,8 @@ This [GitHub Action](https://github.com/actions) analyses test result files and publishes the results on GitHub. It supports [JSON (Dart, Mocha), TRX (MSTest, VS) and XML (JUnit, NUnit, XUnit) file formats](#generating-test-result-files), and runs on Linux, macOS and Windows. -You can add this action to your GitHub workflow for ![Ubuntu Linux](https://badgen.net/badge/icon/Ubuntu?icon=terminal&label) (e.g. `runs-on: ubuntu-latest`) runners: +You can use this action with ![Ubuntu Linux](misc/badge-ubuntu.svg) runners (e.g. `runs-on: ubuntu-latest`) +or ![ARM Linux](misc/badge-arm.svg) self-hosted runners: ```yaml - name: Publish Test Results @@ -36,8 +34,10 @@ You can add this action to your GitHub workflow for ![Ubuntu Linux](https://badg test-results/**/*.json ``` -Use this for ![macOS](https://badgen.net/badge/icon/macOS?icon=apple&label) (e.g. `runs-on: macos-latest`) -and ![Windows](https://badgen.net/badge/icon/Windows?icon=windows&label) (e.g. `runs-on: windows-latest`) runners: +See the [notes on running this action with absolute paths](#running-with-absolute-paths) if you cannot use relative test result file paths. + +Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`) +and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: ```yaml - name: Publish Test Results @@ -60,9 +60,9 @@ The `if: always()` clause guarantees that this action always runs, even if earli When run multiple times in one workflow, the [option](#configuration) `check_name` has to be set to a unique value for each instance. Otherwise, the multiple runs overwrite each other's results. -***Note:** This action does not fail if tests failed. The action that executed the tests should -fail on test failure. The published results however indicate failure if tests fail or errors occur. -This behaviour is configurable.* +***Note:** By default, this action does not fail if tests failed. This can be [configured](#configuration) via `action_fail`. +The action that executed the tests should fail on test failure. The published results however indicate failure if tests fail or errors occur, +which can be [configured](#configuration) via `fail_on`.* ## Permissions @@ -94,14 +94,14 @@ Check your favorite development and test environment for its JSON, TRX file or J |Test Environment |Language| JUnit
XML | NUnit
XML | XUnit
XML | TRX
file | JSON
file | |-----------------|:------:|:---------:|:---------:|:---------:|:---:|:---:| -|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | | :heavy_check_mark: | -|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:heavy_check_mark:| | | | | -|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:heavy_check_mark:| | | | | -|[Mocha](https://mochajs.org/#xunit)|JavaScript|:heavy_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| | :heavy_check_mark: | -|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:heavy_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | -|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:heavy_check_mark:| | | | | -|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:heavy_check_mark:| | | | | -|Your favorite
environment|Your favorite
language|probably
:heavy_check_mark:| | | | | +|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | |:white_check_mark:| +|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:white_check_mark:| | | | | +|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:white_check_mark:| | | | | +|[Mocha](https://mochajs.org/#xunit)|JavaScript|:white_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| |:white_check_mark:| +|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:white_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:white_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | +|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:white_check_mark:| | | | | +|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:white_check_mark:| | | | | +|Your favorite
environment|Your favorite
language|probably
:white_check_mark:| | | | | ## What is new in version 2 @@ -128,7 +128,7 @@ See workaround for `check_name`. ### Modes `create new` and `update last` removed for option `comment_mode` The action always updates an earlier pull request comment, which is the exact behaviour of mode `update last`. -The configuration options `create new` and `update last` are therefore removed. +The [configuration](#configuration) options `create new` and `update last` are therefore removed. **Impact:** An existing pull request comment is always updated. @@ -150,7 +150,7 @@ Set `comment_mode` to `always` (the default) or `off`. ## Publishing test results -Test results are published on GitHub at various (configurable) places: +Test results are published on GitHub at various ([configurable](#configuration)) places: - as [a comment](#pull-request-comment) in related pull requests - as [a check](#commit-and-pull-request-checks) in the checks section of a commit and related pull requests @@ -185,6 +185,8 @@ Those are highlighted in pull request comments to easily spot unintended test re ***Note:** This requires `check_run_annotations` to be set to `all tests, skipped tests`.* +Comments can be disabled with `comment_mode: off`. + ### Commit and pull request checks The checks section of a commit and related pull requests list a short summary (here `1 fail, 1 skipped, …`), @@ -198,6 +200,8 @@ Pull request checks: ![pull request checks example](misc/github-pull-request-checks.png) +Check runs can be disabled with `check_run: false`. + ### Commit and pull request annotations Each failing test produces an annotation with failure details in the checks section of a commit: @@ -208,8 +212,13 @@ and the changed files section of related pull requests: ![annotations example changed files](misc/github-pull-request-changes-annotation.png) +***Note:** Annotations for test files are only supported when test file paths in test result files are relative to the repository root. +Use option `test_file_prefix` to add a prefix to, or remove a prefix from these file paths. See [Configuration](#configuration) section for details.* + ***Note:** Only the first failure of a test is shown. If you want to see all failures, set `report_individual_runs: "true"`.* +Check run annotations can be disabled with `ignore_runs: true`. + ### GitHub Actions job summary The results are added to the job summary page of the workflow that runs this action: @@ -220,12 +229,16 @@ In presence of failures or errors, the job summary links to the respective [chec ***Note:** Job summary requires [GitHub Actions runner v2.288.0](https://github.com/actions/runner/releases/tag/v2.288.0) or above.* +Job summaries can be disabled with `job_summary: false`. + ### GitHub Actions check summary of a commit Test results are published in the GitHub Actions check summary of the respective commit: ![checks comment example](misc/github-checks-comment.png) +Check runs can be disabled with `check_run: false`. + ## The symbols [comment]: <> (This heading is linked to from method get_link_and_tooltip_label_md) @@ -233,7 +246,7 @@ The symbols have the following meaning: |Symbol|Meaning| |:----:|-------| -||A successful test or run| +|  :white_check_mark:|A successful test or run| ||A skipped test or run| ||A failed test or run| ||An erroneous test or run| @@ -260,7 +273,7 @@ The list of most notable options: |Option|Default Value|Description| |:-----|:-----:|:----------| -|`files`|_no default_|File patterns of test result files. Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| +|`files`|_no default_|File patterns of test result files. Relative paths are known to work best, while the composite action [also works with absolute paths](#running-with-absolute-paths). Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| |`check_name`|`"Test Results"`|An alternative name for the check result. Required to be unique for each instance in one workflow.| |`comment_title`|same as `check_name`|An alternative name for the pull request comment.| |`comment_mode`|`always`|The action posts comments to pull requests that are associated with the commit. Set to:
`always` - always comment
`changes` - comment when changes w.r.t. the target branch exist
`changes in failures` - when changes in the number of failures and errors exist
`changes in errors` - when changes in the number of (only) errors exist
`failures` - when failures or errors exist
`errors` - when (only) errors exist
`off` - to not create pull request comments.| @@ -290,8 +303,10 @@ The list of most notable options: |Option|Default Value|Description| |:-----|:-----:|:----------| -|`time_unit`|`seconds`|Time values in the XML files have this unit. Supports `seconds` and `milliseconds`.| -|`job_summary`|`true`| Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| +|`time_unit`|`seconds`|Time values in the test result files have this unit. Supports `seconds` and `milliseconds`.| +|`test_file_prefix`|`none`|Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".| +|`check_run`|`true`|Set to `true`, the results are published as a check run, but it may not be associated with the workflow that ran this action.| +|`job_summary`|`true`|Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| |`compare_to_earlier_commit`|`true`|Test results are compared to results of earlier commits to show changes:
`false` - disable comparison, `true` - compare across commits.'| |`test_changes_limit`|`10`|Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of `0`.| |`report_individual_runs`|`false`|Individual runs of the same test may see different failures. Reports all individual failures when set `true`, and the first failure only otherwise.| @@ -340,7 +355,7 @@ Here is an example JSON: ```json { "title": "4 parse errors, 4 errors, 23 fail, 18 skipped, 227 pass in 39m 12s", - "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/im-open/publish-unit-test-result-action/blob/v1.0.0/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", + "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", "conclusion": "success", "stats": { "files": 24, @@ -397,7 +412,7 @@ is not easily available, e.g. when [creating a badge from test results](#create-
Access JSON via file -The optional `json_file` allows to configure a file where extended JSON information are to be written. +The optional `json_file` allows to [configure](#configuration) a file where extended JSON information are to be written. Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` contain more information than just the number of errors and annotations, respectively. @@ -765,6 +780,34 @@ Set the `gistURL` to the Gist that you want to write the badge file to, in the f You can then use the badge via this URL: https://gist.githubusercontent.com/{user}/{id}/raw/badge.svg
+## Running with absolute paths + +It is known that this action works best with relative paths (e.g. `test-results/**/*.xml`), +but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the composite variant +of this action (`uses: EnricoMi/publish-unit-test-result-action/composite@v2`). + +If you have to use absolute paths with the non-composite variant of this action (`uses: EnricoMi/publish-unit-test-result-action@v2`), +you have to copy files to a relative path first, and then use the relative path: + +```yaml +- name: Copy Test Results + if: always() + run: | + cp -Lpr /tmp/test-results test-results + shell: bash + +- name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: always() + with: + files: | + test-results/**/*.xml + test-results/**/*.trx + test-results/**/*.json +``` + +Using the non-composite variant of this action is recommended as it starts up much quicker. + ## Running as a composite action Running this action as a composite action allows to run it on various operating systems as it @@ -827,13 +870,9 @@ publish-test-results: path: artifacts - name: Publish Test Results - uses: im-open/publish-unit-test-result-action/composite@v2 + uses: EnricoMi/publish-unit-test-result-action/composite@v2 with: files: "artifacts/**/*.xml" ``` -## Credits - -The initial GitHub action has been created by [EnricoMi](https://github.com/EnricoMi) at -[EnricoMi/publish-unit-test-result-action](https://github.com/EnricoMi/publish-unit-test-result-action). diff --git a/action.yml b/action.yml index 2bcf081a..fa2f717c 100644 --- a/action.yml +++ b/action.yml @@ -42,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Supports *, **, ?, and []. Use multiline string for multiple patterns. Patterns starting with ! exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' @@ -60,6 +60,9 @@ inputs: description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' default: 'seconds' required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false report_individual_runs: description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' required: false @@ -77,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' @@ -135,14 +142,13 @@ inputs: description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' default: 'false' required: false - outputs: json: description: "Test results as JSON" runs: using: 'docker' - image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.9.0' + image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.12.0' branding: icon: 'check-circle' diff --git a/composite/action.yml b/composite/action.yml index dec0448a..f9651c11 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -42,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Supports *, **, ?, and []. Use multiline string for multiple patterns. Patterns starting with ! exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' @@ -60,6 +60,9 @@ inputs: description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' default: 'seconds' required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false report_individual_runs: description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' required: false @@ -77,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' @@ -135,6 +142,7 @@ inputs: description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' default: 'false' required: false + outputs: json: description: "Test results as JSON" @@ -182,6 +190,7 @@ runs: ;; Windows*) echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT + echo "pip-options=--user" >> $GITHUB_OUTPUT ;; esac shell: bash @@ -192,14 +201,49 @@ runs: continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-d81e5b217e041ea3f958821f6daca2f5 + key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-fc884bb0b8d89fb24ccb9a84a3d97821 + + - name: Create virtualenv + id: venv + continue-on-error: true + env: + PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} + run: | + echo '##[group]Create virtualenv' + # install virtualenv, if it is not yet installed + python3 -m pip install $PIP_OPTIONS virtualenv + python3 -m virtualenv enricomi-publish-action-venv + # test activating virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac + which python3 + echo '##[endgroup]' + shell: bash - name: Install Python dependencies + env: + PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} run: | echo '##[group]Install Python dependencies' + if [ "${{ steps.venv.outcome }}" == "success" ] + then + # activate virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac + fi + which python3 + # make sure wheel is installed, which improves installing our dependencies - python3 -m pip install wheel - python3 -m pip install -r $GITHUB_ACTION_PATH/../python/requirements.txt + python3 -m pip install $PIP_OPTIONS wheel + python3 -m pip install $PIP_OPTIONS -r $GITHUB_ACTION_PATH/../python/requirements.txt echo '##[endgroup]' shell: bash @@ -207,6 +251,13 @@ runs: id: test-results run: | echo '##[group]Publish Test Results' + # activate virtualenv + case "$RUNNER_OS" in + Linux*|macOS*) + source enricomi-publish-action-venv/bin/activate;; + Windows*) + source enricomi-publish-action-venv\\Scripts\\activate;; + esac python3 $GITHUB_ACTION_PATH/../python/publish_test_results.py echo '##[endgroup]' env: @@ -226,6 +277,7 @@ runs: XUNIT_FILES: ${{ inputs.xunit_files }} TRX_FILES: ${{ inputs.trx_files }} TIME_UNIT: ${{ inputs.time_unit }} + TEST_FILE_PREFIX: ${{ inputs.test_file_prefix }} REPORT_INDIVIDUAL_RUNS: ${{ inputs.report_individual_runs }} REPORT_SUITE_LOGS: ${{ inputs.report_suite_logs }} DEDUPLICATE_CLASSES_BY_FILE_NAME: ${{ inputs.deduplicate_classes_by_file_name }} @@ -245,6 +297,7 @@ runs: JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }} JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} + CHECK_RUN: ${{ inputs.check_run }} JOB_SUMMARY: ${{ inputs.job_summary }} SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }} # not documented diff --git a/misc/badge-arm.svg b/misc/badge-arm.svg new file mode 100644 index 00000000..2fba6119 --- /dev/null +++ b/misc/badge-arm.svg @@ -0,0 +1,20 @@ + + ARM + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-dart.svg b/misc/badge-dart.svg deleted file mode 100644 index 42ee1507..00000000 --- a/misc/badge-dart.svg +++ /dev/null @@ -1,20 +0,0 @@ - - Dart: JSON - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-js.svg b/misc/badge-js.svg new file mode 100644 index 00000000..c87b6b0b --- /dev/null +++ b/misc/badge-js.svg @@ -0,0 +1,20 @@ + + JS / Dart / Mocha: JSON + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-junit-xml.svg b/misc/badge-junit-xml.svg deleted file mode 100644 index 90ac0c64..00000000 --- a/misc/badge-junit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - JUnit: XML - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-mocha.svg b/misc/badge-mocha.svg deleted file mode 100644 index 169af1a6..00000000 --- a/misc/badge-mocha.svg +++ /dev/null @@ -1,20 +0,0 @@ - - Mocha: JSON - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-nunit-xml.svg b/misc/badge-nunit-xml.svg deleted file mode 100644 index aa954cb0..00000000 --- a/misc/badge-nunit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - NUnit: XML - - - - - - - - - - - - - \ No newline at end of file diff --git a/misc/badge-trx.svg b/misc/badge-trx.svg index 7d15af67..010c5b2d 100644 --- a/misc/badge-trx.svg +++ b/misc/badge-trx.svg @@ -1,20 +1,20 @@ - - TRX: OK - + + .Net: TRX + - - - - - + + + + + \ No newline at end of file diff --git a/misc/badge-xml.svg b/misc/badge-xml.svg new file mode 100644 index 00000000..f935934d --- /dev/null +++ b/misc/badge-xml.svg @@ -0,0 +1,20 @@ + + JUnit / NUnit / XUnit: XML + + + + + + + + + + + + + \ No newline at end of file diff --git a/misc/badge-xunit-xml.svg b/misc/badge-xunit-xml.svg deleted file mode 100644 index 752b78cc..00000000 --- a/misc/badge-xunit-xml.svg +++ /dev/null @@ -1,20 +0,0 @@ - - XUnit: XML - - - - - - - - - - - - - \ No newline at end of file diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 4bec0ce9..05e70d36 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -5,20 +5,20 @@ import re from collections import defaultdict from dataclasses import dataclass -from typing import List, Any, Union, Optional, Tuple, Mapping, Iterator, Set, Iterable +from typing import List, Any, Union, Optional, Tuple, Mapping, Iterator, Set, Iterable, Dict from publish.unittestresults import Numeric, UnitTestSuite, UnitTestCaseResults, UnitTestRunResults, \ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError # keep the version in sync with action.yml -__version__ = 'v2.9.0' +__version__ = 'v2.12.0' logger = logging.getLogger('publish') digest_prefix = '[test-results]:data:' digest_mime_type = 'application/gzip' digest_encoding = 'base64' digest_header = f'{digest_prefix}{digest_mime_type};{digest_encoding},' -digit_space = '  ' +digit_space = ' ' punctuation_space = ' ' comment_mode_off = 'off' @@ -151,6 +151,24 @@ def removed_skips(self) -> Optional[Set[str]]: return skipped_before.intersection(removed) +def get_json_path(json: Dict[str, Any], path: Union[str, List[str]]) -> Any: + if isinstance(path, str): + path = path.split('.') + + if path[0] not in json: + return None + + elem = json[path[0]] + + if len(path) > 1: + if isinstance(elem, dict): + return get_json_path(elem, path[1:]) + else: + return None + else: + return elem + + def utf8_character_length(c: int) -> int: if c >= 0x00010000: return 4 @@ -428,11 +446,11 @@ def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str: all_tests_label_md = 'tests' -passed_tests_label_md = get_link_and_tooltip_label_md(':heavy_check_mark:', 'passed tests') -skipped_tests_label_md = get_link_and_tooltip_label_md(':zzz:', 'skipped / disabled tests') -failed_tests_label_md = get_link_and_tooltip_label_md(':x:', 'failed tests') -test_errors_label_md = get_link_and_tooltip_label_md(':fire:', 'test errors') -duration_label_md = get_link_and_tooltip_label_md(':stopwatch:', 'duration of all tests') +passed_tests_label_md = ':white_check_mark:' +skipped_tests_label_md = ':zzz:' +failed_tests_label_md = ':x:' +test_errors_label_md = ':fire:' +duration_label_md = ':stopwatch:' def get_short_summary_md(stats: UnitTestRunResultsOrDeltaResults) -> str: @@ -619,7 +637,7 @@ def get_long_summary_with_runs_md(stats: UnitTestRunResultsOrDeltaResults, runs_error=as_stat_number(stats.runs_error, error_digits, error_delta_digits, test_errors_label_md) ) if get_magnitude(stats.runs_error) else '' runs_line = '{runs} {runs_succ} {runs_skip} {runs_fail}{runs_error_part}\n'.format( - runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), + runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), runs_succ=as_stat_number(stats.runs_succ, success_digits, success_delta_digits, passed_tests_label_md), runs_skip=as_stat_number(stats.runs_skip, skip_digits, skip_delta_digits, skipped_tests_label_md), runs_fail=as_stat_number(stats.runs_fail, fail_digits, fail_delta_digits, failed_tests_label_md), diff --git a/python/publish/junit.py b/python/publish/junit.py index c300b434..79a348c1 100644 --- a/python/publish/junit.py +++ b/python/publish/junit.py @@ -175,7 +175,24 @@ def parse(path: str) -> JUnitTree: return progress_safe_parse_xml_file(files, parse, progress) -def process_junit_xml_elems(trees: Iterable[ParsedJUnitFile], time_factor: float = 1.0, add_suite_details: bool = False) -> ParsedUnitTestResults: +def adjust_prefix(file: Optional[str], prefix: Optional[str]) -> Optional[str]: + if prefix is None or file is None: + return file + + # prefix starts either with '+' or '-' + if prefix.startswith('+'): + # add prefix + return "".join([prefix[1:], file]) + + # remove prefix + return file[len(prefix)-1:] if file.startswith(prefix[1:]) else file + + +def process_junit_xml_elems(trees: Iterable[ParsedJUnitFile], + *, + time_factor: float = 1.0, + test_file_prefix: Optional[str] = None, + add_suite_details: bool = False) -> ParsedUnitTestResults: def create_junitxml(filepath: str, tree: JUnitTree) -> JUnitXmlOrParseError: try: instance = JUnitXml.fromroot(tree.getroot()) @@ -265,7 +282,7 @@ def get_text(elem, tag): cases = [ UnitTestCase( result_file=result_file, - test_file=case._elem.get('file'), + test_file=adjust_prefix(case._elem.get('file'), test_file_prefix), line=int_opt(case._elem.get('line')), class_name=case.classname, test_name=case.name, diff --git a/python/publish/publisher.py b/python/publish/publisher.py index d28ecd71..2b5ec7c8 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -13,7 +13,7 @@ from github.PullRequest import PullRequest from github.IssueComment import IssueComment -from publish import __version__, comment_mode_off, digest_prefix, restrict_unicode_list, \ +from publish import __version__, get_json_path, comment_mode_off, digest_prefix, restrict_unicode_list, \ comment_mode_always, comment_mode_changes, comment_mode_changes_failures, comment_mode_changes_errors, \ comment_mode_failures, comment_mode_errors, \ get_stats_from_digest, digest_header, get_short_summary, get_long_summary_md, \ @@ -54,10 +54,12 @@ class Settings: nunit_files_glob: Optional[str] xunit_files_glob: Optional[str] trx_files_glob: Optional[str] + test_file_prefix: Optional[str] time_factor: float check_name: str comment_title: str comment_mode: str + check_run: bool job_summary: bool compare_earlier: bool pull_request_build: str @@ -195,21 +197,22 @@ def publish(self, if logger.isEnabledFor(logging.DEBUG): logger.debug(f'Publishing {stats}') - if self._settings.is_fork: - # running on a fork, we cannot publish the check, but we can still read before_check_run - # bump the version if you change the target of this link (if it did not exist already) or change the section - logger.info('This action is running on a pull_request event for a fork repository. ' - 'Pull request comments and check runs cannot be created, so disabling these features. ' - 'To fully run the action on fork repository pull requests, see ' - f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') - check_run = None - before_check_run = None - if self._settings.compare_earlier: - before_commit_sha = self._settings.event.get('before') - logger.debug(f'comparing against before={before_commit_sha}') - before_check_run = self.get_check_run(before_commit_sha) - else: - check_run, before_check_run = self.publish_check(stats, cases, conclusion) + check_run = None + before_check_run = None + if self._settings.check_run: + if self._settings.is_fork: + # running on a fork, we cannot publish the check, but we can still read before_check_run + # bump the version if you change the target of this link (if it did not exist already) or change the section + logger.info('This action is running on a pull_request event for a fork repository. ' + 'Pull request comments and check runs cannot be created, so disabling these features. ' + 'To fully run the action on fork repository pull requests, see ' + f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') + if self._settings.compare_earlier: + before_commit_sha = get_json_path(self._settings.event, 'before') + logger.debug(f'comparing against before={before_commit_sha}') + before_check_run = self.get_check_run(before_commit_sha) + else: + check_run, before_check_run = self.publish_check(stats, cases, conclusion) if self._settings.job_summary: self.publish_job_summary(self._settings.comment_title, stats, check_run, before_check_run) @@ -226,8 +229,8 @@ def publish(self, logger.info('Commenting on pull requests disabled') def get_pull_from_event(self) -> Optional[PullRequest]: - number = self._settings.event.get('pull_request', {}).get('number') - repo = self._settings.event.get('pull_request', {}).get('base', {}).get('repo', {}).get('full_name') + number = get_json_path(self._settings.event, 'pull_request.number') + repo = get_json_path(self._settings.event, 'pull_request.base.repo.full_name') if number is None or repo is None or repo != self._settings.repo: return None @@ -389,7 +392,7 @@ def publish_check(self, before_stats = None before_check_run = None if self._settings.compare_earlier: - before_commit_sha = self._settings.event.get('before') + before_commit_sha = get_json_path(self._settings.event, 'before') logger.debug(f'comparing against before={before_commit_sha}') before_check_run = self.get_check_run(before_commit_sha) before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None @@ -467,7 +470,7 @@ def publish_json(self, data: PublishData): def publish_job_summary(self, title: str, stats: UnitTestRunResults, - check_run: CheckRun, + check_run: Optional[CheckRun], before_check_run: Optional[CheckRun]): before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats @@ -685,7 +688,7 @@ def get_base_commit_sha(self, pull_request: PullRequest) -> Optional[str]: if self._settings.event: # for pull request events we take the other parent of the merge commit (base) if self._settings.event_name == 'pull_request': - return self._settings.event.get('pull_request', {}).get('base', {}).get('sha') + return get_json_path(self._settings.event, 'pull_request.base.sha') # for workflow run events we should take the same as for pull request events, # but we have no way to figure out the actual merge commit and its parents # we do not take the base sha from pull_request as it is not immutable @@ -727,18 +730,13 @@ def get_pull_request_comments(self, pull: PullRequest, order_by_updated: bool) - "POST", self._settings.graphql_url, input=query ) - return data \ - .get('data', {}) \ - .get('repository', {}) \ - .get('pullRequest', {}) \ - .get('comments', {}) \ - .get('nodes') + return get_json_path(data, 'data.repository.pullRequest.comments.nodes') def get_action_comments(self, comments: List[Mapping[str, Any]], is_minimized: Optional[bool] = False): comment_body_start = f'## {self._settings.comment_title}\n' comment_body_indicators = ['\nresults for commit ', '\nResults for commit '] return list([comment for comment in comments - if comment.get('author', {}).get('login') == self._settings.actor + if get_json_path(comment, 'author.login') == self._settings.actor and (is_minimized is None or comment.get('isMinimized') == is_minimized) and comment.get('body', '').startswith(comment_body_start) and any(indicator in comment.get('body', '') for indicator in comment_body_indicators)]) diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 537f858c..9bf3dd11 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -4,6 +4,7 @@ import re import sys from glob import glob +from pathlib import Path from typing import List, Optional, Union, Mapping, Tuple, Any, Iterable, Callable import github @@ -55,7 +56,7 @@ def get_github(auth: github.Auth, seconds_between_writes=seconds_between_writes) -def get_files(multiline_files_globs: str) -> List[str]: +def get_files(multiline_files_globs: str) -> Tuple[List[str], bool]: multiline_files_globs = re.split('\r?\n\r?', multiline_files_globs) included = {str(file) for files_glob in multiline_files_globs @@ -65,7 +66,10 @@ def get_files(multiline_files_globs: str) -> List[str]: for files_glob in multiline_files_globs if files_glob.startswith('!') for file in glob(files_glob[1:], recursive=True)} - return list(included - excluded) + has_absolute = any({Path(pattern).is_absolute() + for files_glob in multiline_files_globs + for pattern in [files_glob[1:] if files_glob.startswith('!') else files_glob]}) + return list(included - excluded), has_absolute def prettify_glob_pattern(pattern: Optional[str]) -> Optional[str]: @@ -77,12 +81,15 @@ def expand_glob(pattern: Optional[str], file_format: Optional[str], gha: GithubA if not pattern: return [] - files = get_files(pattern) + files, has_absolute_patterns = get_files(pattern) file_format = f' {file_format}' if file_format else '' prettyfied_pattern = prettify_glob_pattern(pattern) if len(files) == 0: gha.warning(f'Could not find any{file_format} files for {prettyfied_pattern}') + if has_absolute_patterns: + gha.warning(f'Your file pattern contains absolute paths, please read the notes on absolute paths:') + gha.warning(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths') else: logger.info(f'Reading{file_format} files {prettyfied_pattern} ({get_number_of_files(files)}, {get_files_size(files)})') logger.debug(f'reading{file_format} files {list(files)}') @@ -208,6 +215,7 @@ def parse_files(settings: Settings, gha: GithubAction) -> ParsedUnitTestResultsW return process_junit_xml_elems( elems, time_factor=settings.time_factor, + test_file_prefix=settings.test_file_prefix, add_suite_details=settings.report_suite_out_logs or settings.report_suite_err_logs or settings.json_suite_details ).with_commit(settings.commit) @@ -219,7 +227,7 @@ def log_parse_errors(errors: List[ParseError], gha: GithubAction): def action_fail_required(conclusion: str, action_fail: bool, action_fail_on_inconclusive: bool) -> bool: return action_fail and conclusion == 'failure' or \ - action_fail_on_inconclusive and conclusion == 'inconclusive' + action_fail_on_inconclusive and conclusion == 'neutral' def main(settings: Settings, gha: GithubAction) -> None: @@ -261,9 +269,8 @@ def main(settings: Settings, gha: GithubAction) -> None: Publisher(settings, gh, gha).publish(stats, results.case_results, conclusion) if action_fail_required(conclusion, settings.action_fail, settings.action_fail_on_inconclusive): - gha.error(f'This action finished successfully, but test results have status {conclusion}.') - gha.error(f'Configuration requires this action to fail (action_fail={settings.action_fail}, ' - f'action_fail_on_inconclusive={settings.action_fail_on_inconclusive}).') + status = f"{conclusion} / inconclusive" if conclusion == "neutral" else conclusion + gha.error(f'This action finished successfully, but test results have status {status}.') sys.exit(1) @@ -390,6 +397,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: event = json.load(f) repo = get_var('GITHUB_REPOSITORY', options) + check_run = get_bool_var('CHECK_RUN', options, default=True) job_summary = get_bool_var('JOB_SUMMARY', options, default=True) comment_mode = get_var('COMMENT_MODE', options) or comment_mode_always @@ -464,9 +472,11 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: xunit_files_glob=get_var('XUNIT_FILES', options), trx_files_glob=get_var('TRX_FILES', options), time_factor=time_factor, + test_file_prefix=get_var('TEST_FILE_PREFIX', options) or None, check_name=check_name, comment_title=get_var('COMMENT_TITLE', options) or check_name, comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=get_bool_var('COMPARE_TO_EARLIER_COMMIT', options, default=True), pull_request_build=get_var('PULL_REQUEST_BUILD', options) or 'merge', @@ -481,12 +491,16 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: seconds_between_github_reads=float(seconds_between_github_reads), seconds_between_github_writes=float(seconds_between_github_writes), secondary_rate_limit_wait_seconds=float(secondary_rate_limit_wait_seconds), - search_pull_requests=get_bool_var('SEARCH_PULL_REQUESTS', options, default=False) + search_pull_requests=get_bool_var('SEARCH_PULL_REQUESTS', options, default=False), ) check_var(settings.token, 'GITHUB_TOKEN', 'GitHub token') check_var(settings.repo, 'GITHUB_REPOSITORY', 'GitHub repository') check_var(settings.commit, 'COMMIT, GITHUB_SHA or event file', 'Commit SHA') + check_var_condition( + settings.test_file_prefix is None or any([settings.test_file_prefix.startswith(sign) for sign in ['-', '+']]), + f"TEST_FILE_PREFIX is optional, but when given, it must start with '-' or '+': {settings.test_file_prefix}" + ) check_var(settings.comment_mode, 'COMMENT_MODE', 'Comment mode', comment_modes) check_var(settings.pull_request_build, 'PULL_REQUEST_BUILD', 'Pull Request build', pull_request_build_modes) check_var(suite_logs_mode, 'REPORT_SUITE_LOGS', 'Report suite logs mode', available_report_suite_logs) diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt index b3d52c3e..dfc0768a 100644 --- a/python/requirements-direct.txt +++ b/python/requirements-direct.txt @@ -1,7 +1,5 @@ humanize==3.14.0 junitparser==3.1.0 -lxml==4.9.1 -psutil==5.9.5 -PyGithub==2.0.1rc0 -requests==2.31.0 -urllib3==1.26.16 +lxml==4.9.3 +psutil==5.9.7 +PyGithub==2.1.1 diff --git a/python/requirements.txt b/python/requirements.txt index e9d792a9..76b9b240 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,19 +1,25 @@ humanize==3.14.0 junitparser==3.1.0 future==0.18.3 -lxml==4.9.1 -psutil==5.9.5 -PyGithub==2.0.1rc0 +lxml==4.9.3 +psutil==5.9.7 +PyGithub==2.1.1 Deprecated==1.2.14 - wrapt==1.15.0 + wrapt==1.16.0 PyJWT==2.8.0 PyNaCl==1.5.0 + # latest version that support Python 3.7 cffi==1.15.1 pycparser==2.21 python-dateutil==2.8.2 six==1.16.0 requests==2.31.0 - certifi==2023.7.22 - charset-normalizer==3.2.0 - idna==3.4 - urllib3==1.26.16 + certifi==2023.11.17 + charset-normalizer==3.3.2 + idna==3.6 + # latest version that support Python 3.7 + urllib3==2.0.7 + # latest version that support Python 3.7 + typing_extensions==4.7.1 + # latest version that support Python 3.7 + urllib3==2.0.7 diff --git a/python/test/files/dart/json/tests.annotations b/python/test/files/dart/json/tests.annotations index dc5fdfa6..bcb21cb3 100644 --- a/python/test/files/dart/json/tests.annotations +++ b/python/test/files/dart/json/tests.annotations @@ -7,21 +7,10 @@ 'output': { 'title': '2 errors, 1 fail, 1 skipped, 16 pass in 0s', 'summary': - '20 tests\u2002\u2003\u200316 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '4 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '20 tests\u2002\u2003\u200316 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20074 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20071 :x:\u2003\u20032 :fire:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REiy4TYkVDpTHqKrp74w+au3nvY97DNzhXwyfWNYwbBzbAQLA4FBa0ImwJabB+6j' 'PMxknpP8diDrhTK4pNwFmJFVGjz5BBp3LR31UwitIL/MsF/tekvi6wBOliZhf8/QAMgVR' diff --git a/python/test/files/junit-xml/bazel/suite-logs.annotations b/python/test/files/junit-xml/bazel/suite-logs.annotations index ced06018..61324679 100644 --- a/python/test/files/junit-xml/bazel/suite-logs.annotations +++ b/python/test/files/junit-xml/bazel/suite-logs.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLbT1M4QgxIsC5g4q498liHp0OzvJnNLBbknOYhqEpAzpgyWjThBDwbFgEelVdSvKxn' 'CpaIOjO5yGvTssYsQWwRyITZ57+K9VZrHKvGWi95AKtCVo1fK6AX55nzvdAAAA\n', diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations index d045baf9..e032cabf 100644 --- a/python/test/files/junit-xml/jest/jest-junit.annotations +++ b/python/test/files/junit-xml/jest/jest-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations index 8107e6c7..900001bf 100644 --- a/python/test/files/junit-xml/junit.multiresult.annotations +++ b/python/test/files/junit-xml/junit.multiresult.annotations @@ -7,28 +7,10 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 1s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20021s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n4 runs\u2006\u2003' - '-2 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20033 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20032 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20031 suites\u2004\u2003\u20021s :stopwatch:\n4 tests\u2003' + '1 :white_check_mark:\u20031 :zzz:\u20031 :x:\u20031 :fire:\n4 runs\u200a\u2003' + '-2 :white_check_mark:\u20033 :zzz:\u20032 :x:\u20031 :fire:\n\n' + 'Results for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KotfBTeRlCEONGPmYXKuPdlQhEujdvkrn4BkYTX9jQMU4RQoU1ogzgXcZXhKTmsgVFpf' '5S0AFnc2wSTHNoRI/5wehKL82S68d6fLmpcK5V/48pby2EF/JitEt+P6y+BE/eAAAA\n', diff --git a/python/test/files/junit-xml/minimal-attributes.annotations b/python/test/files/junit-xml/minimal-attributes.annotations index d61a33f6..22e1f1f9 100644 --- a/python/test/files/junit-xml/minimal-attributes.annotations +++ b/python/test/files/junit-xml/minimal-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations index aa6fca0f..fe8ae8dc 100644 --- a/python/test/files/junit-xml/mocha/latex-utensils.annotations +++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations @@ -7,25 +7,10 @@ 'output': { 'title': 'All 101 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 ' - 'suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n101 ' - 'tests\u2003101 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n109 runs\u2006\u2003' - '109 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '\u2007\u20071 files\u2004\u2003\u2007\u20071 suites\u2004\u2003\u2002' + '0s :stopwatch:\n101 tests\u2003101 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n109 runs\u200a\u2003109 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv' '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK' 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n', diff --git a/python/test/files/junit-xml/no-attributes.annotations b/python/test/files/junit-xml/no-attributes.annotations index b263b6d0..b06aa7a9 100644 --- a/python/test/files/junit-xml/no-attributes.annotations +++ b/python/test/files/junit-xml/no-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations index c11559f9..0f39fd65 100644 --- a/python/test/files/junit-xml/no-cases-but-tests.annotations +++ b/python/test/files/junit-xml/no-cases-but-tests.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 skipped, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MS+bmCo7V8ol1DePWg/th8SgcGE3YEtLhvmvMe7' 'ZeShJDETtcJPpfKAFHqkWxIhpMQfQ6975Z5yKXWuAqFrhuSXOe4AjSYnYT/HkBNCXSZd0' diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations index 95c7030e..8ce031e6 100644 --- a/python/test/files/junit-xml/no-cases.annotations +++ b/python/test/files/junit-xml/no-cases.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/junit-xml/non-junit.annotations b/python/test/files/junit-xml/non-junit.annotations index c3c2e082..28635d0b 100644 --- a/python/test/files/junit-xml/non-junit.annotations +++ b/python/test/files/junit-xml/non-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 parse errors', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n0 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n1 errors\n\nResults ' - 'for commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n0 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n1 errors\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBBFf' '0Vm3aK2/UyIKQ35iBldRf+emNC4u+dcODc49JZhVcukgAvmBnOFvZDOmGLHemSWe+NizC' 'hOvAbhNPpBWKJE3VCJLKbMffzXGotYY9kyKQTMFfpSfGh4XnRU87HdAAAA\n', diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations index dcda56f5..969db188 100644 --- a/python/test/files/junit-xml/pytest/junit.fail.annotations +++ b/python/test/files/junit-xml/pytest/junit.fail.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 3 pass in 2s', 'summary': - '5 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '5 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfzExssYghA3IpgFKuPdXVEUuzez2dm5BqM8H1hTMe4jhBemiCKAs4QtIR3CderzHn' '2UkkT3iQW25/kWWoD5CYXokExNBqPNvWuWuZu/WuIilrhsSbeuEAiexfws+HECiWEEJ90' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations index efe1f950..d8c15b5c 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 10 tests pass, 4 skipped in 1m 12s', 'summary': - '14 tests\u2002\u2003\u200310 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 12s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20044 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '14 tests\u2002\u2003\u200310 :white_check_mark:\u2003\u20031m 12s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20074 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd' 'EFKaOpi1lhJ1NeZgGaRPlQiBazwbC9xXj/grcovcSfXOJvTVpjwBPki7lF8PMCyjZFT+I' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations index 85307df2..19251c83 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 20 tests pass, 2 skipped in 10m 27s', 'summary': - '22 tests\u2002\u2003\u200320 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '10m 27s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '22 tests\u2002\u2003\u200320 :white_check_mark:\u2003\u200310m 27s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC' '8MLigVTV3MCns0WcwSNhLlY0K0+BgMJhfj/QveovQSf3KJvzVltQZP8FzMLZKfF82Ojyn' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations index 31c9b6f5..ed382cfd 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 11m 10s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '11m 10s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u200311m 10s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN' 'cP9MZJ+TMz7GTSf68ELJkYETVGg25LRX9nwVu8vcCfXOBvTep1BUsQL2Ymwc8LUe9HxOM' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations index 263648dc..d122405b 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 80 tests pass, 17 skipped in 3m 25s', 'summary': - '97 tests\u2002\u2003\u200380 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 25s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200317 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '97 tests\u2002\u2003\u200380 :white_check_mark:\u2003\u20033m 25s ' + ':stopwatch:\n\u20071 suites\u2003\u200317 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv' 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations index e0fd1fc4..951ee896 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 12 tests pass, 12 skipped in 1m 9s', 'summary': - '24 tests\u2002\u2003\u200312 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 9s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200312 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '24 tests\u2002\u2003\u200312 :white_check_mark:\u2003\u20031m 9s ' + ':stopwatch:\n\u20071 suites\u2003\u200312 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o' 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations index 7d09ff5a..bd91c7c4 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 3 tests pass in 15s', 'summary': - '3 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '15s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u200315s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttAYGy9jCGLciGAWqIx3d/EL3Zt5yewwodEeetFUAnzE8MEYSQZ0NmHHzE9IX/vuwU' 'elSrHgxqL+xCTRFEITOXoMRfv20sxzN/+1i7PYxXlLuXXFwPAs4WcJxwk6KM9l3gAAAA=' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations index 51dd8a65..e217dc05 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 96 tests pass, 1 skipped in 3m 39s', 'summary': - '97 tests\u2002\u2003\u200396 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 39s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '97 tests\u2002\u2003\u200396 :white_check_mark:\u2003\u20033m 39s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf' 'vCYL2UwTSfmWGPgUdoAQuJMgmFaDAa9Fsqhv0LPuLr3Zzlbs5r0qwrOIK4mJ0EPy/3HdY' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations index dfefec24..88026b3c 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 24 tests pass in 2m 4s', 'summary': - '24 tests\u2002\u2003\u200324 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '24 tests\u2002\u2003\u200324 :white_check_mark:\u2003\u20032m 4s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr' 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK' 'xX6mcWPNjUUUwS10JoIkOfIb/HYthF8BWp93CWezivKbNt6Bi+Jews4boBWo1x8eMAAAA' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations index 70a4071b..309d9557 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 45s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 45s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 45s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR' 'hC0jqb5jULbGRqFkbBSqJkMSF6fAwmx8W8f8FbvL2LP7mLvzXtrYVI8CwRZiWPEwEjqVj' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations index cf3cc69e..8f977394 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 52s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 52s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 52s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr' '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf' '2CcsGYaKpkJtjI8L8aNMwkchY9osXHYFi5GO9f8Bapd/End/G3ZuyygCd4LuFGLY8TfGY' diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations index 6c5b09ba..78adb578 100644 --- a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 2s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igSz8TYkZLlrHqKfr3VlOz25t5MBdfQCvLR9Y1jFsPrsDsUTgwB2FPSIcL15D3ZL' '2Uf7HBSaItYhGgf0IhGkwG/ZF7Yda5l79a5CoWuW5Js+/gCNJidhX8fgDdy7133QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations index 94a8b872..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-in-testsuite.annotations +++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations index 94a8b872..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-root.annotations +++ b/python/test/files/junit-xml/testsuite-root.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n4 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations index 981189ca..6bc432e1 100644 --- a/python/test/files/junit-xml/tst/disabled.annotations +++ b/python/test/files/junit-xml/tst/disabled.annotations @@ -7,29 +7,10 @@ 'output': { 'title': '1 errors, 19 fail, 5 skipped, 6 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20042 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n31 ' - 'tests\u2003\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20035 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200319 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n31 runs\u2006\u2003' - '11 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200319 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '\u20071 files\u2004\u2003\u20072 suites\u2004\u2003\u20020s ' + ':stopwatch:\n31 tests\u2003\u20076 :white_check_mark:\u20035 :zzz:\u2003' + '19 :x:\u20031 :fire:\n31 runs\u200a\u200311 :white_check_mark:\u2003' + '0 :zzz:\u200319 :x:\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NQQqAIBBFr' 'yKuW2RRUJcJsaIhyxh1Fd29sdLczXsf3px8Bj1Z3jNRMG49uAcqgtGjdGB2wpKQBhemWk' 'QYrFeKTPuLFQ4STRKzBB3aXTITosHvHfo9FcMdg+IXb7CMnPcekeeU2TZwBN/F7CL5dQP' diff --git a/python/test/files/junit-xml/unsupported-unicode.annotations b/python/test/files/junit-xml/unsupported-unicode.annotations index 562a3976..5953a144 100644 --- a/python/test/files/junit-xml/unsupported-unicode.annotations +++ b/python/test/files/junit-xml/unsupported-unicode.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '2 errors, 2 fail, 2 skipped, 1 pass in 8s', 'summary': - '7 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '8s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20032 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '7 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20038s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '2 :x:\u2003\u20032 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYXXTReBlDUGIjgikwGe9uQVDc/ntt3skV6MXxkbUN4y6Af2EOKDxYQzgQ0sHHU1/25I' 'KU+TeLDQ4S3SuUAP0TC6LFbDCY0ouzzj381RJXscR1S9p9B0+QF3Or4NcNSlhwMN0AAAA' diff --git a/python/test/files/junit-xml/with-xml-entities.annotations b/python/test/files/junit-xml/with-xml-entities.annotations index a887a097..bc1a93f4 100644 --- a/python/test/files/junit-xml/with-xml-entities.annotations +++ b/python/test/files/junit-xml/with-xml-entities.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 2 skipped in 0s', 'summary': - '4 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ5AMBBFr' '9J0bYFYuYw0RUyUyky7Enc3qmV2/72fvFPP4CbSvWoqpSlC+GCMaAL4nbFm5CM8V1f2QN' 'FaeQ60wsGi/cRswOXaKyZEj9lg3EvvmTL38l9LLGKJZcv6bYPAkJeixejrBpBXIV3dAAA' diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations index 4fa7b5d3..7aeb7110 100644 --- a/python/test/files/junit-xml/xunit/xunit.annotations +++ b/python/test/files/junit-xml/xunit/xunit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/mocha/tests.annotations b/python/test/files/mocha/tests.annotations index 607a0c8e..9d7ae538 100644 --- a/python/test/files/mocha/tests.annotations +++ b/python/test/files/mocha/tests.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 2 pass in 12s', 'summary': - '5 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '12s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u200312s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KobSSx8TKGoMaNfMwClfHu8hXs3sxm56Y7yM3SmYwDodaD+2D1yB0YHZEFDhcXb1Pdi/' 'VCBMGaOOEq31nsHORPbIgGi0Gvay/OPpe51RJ3scR9SxilwAUoi9iD0+cFI3viF94AAAA' diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations index 20bedd9c..cf20a207 100644 --- a/python/test/files/nunit/mstest/clicketyclackety.annotations +++ b/python/test/files/nunit/mstest/clicketyclackety.annotations @@ -7,25 +7,10 @@ 'output': { 'title': '10 fail, 12 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20048 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n22 ' - 'tests\u200312 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200310 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n23 runs\u2006\u2003' - '13 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u200310 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '\u20071 files\u2004\u2003\u20078 suites\u2004\u2003\u20020s ' + ':stopwatch:\n22 tests\u200312 :white_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n23 runs\u200a\u200313 :white_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv' '9g9kHr+FklB1z1ft4UmDgpyYroMG8wnEk55Ps3lqAIE9e+FNQ67awFYsiX8LuSzwvzas/' diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations index 5080096e..e05c2074 100644 --- a/python/test/files/nunit/mstest/pickles.annotations +++ b/python/test/files/nunit/mstest/pickles.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations index 717924b9..dec20d71 100644 --- a/python/test/files/nunit/mstest/timewarpinc.annotations +++ b/python/test/files/nunit/mstest/timewarpinc.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 2s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv' '0I4e1CPfsYQhNiIYAqcjH+3IgjednbbObkGozyf2NAx7iOED5aIIoCzhCMhDaFMKc8+Sk' 'lFX4sNjl+hBZjfi0J0mE8w2uJ7Yqt7udoSN7LErUu6fYdAkBPzq+DXDXGDl7HdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations index de1d72e3..8b60e0ad 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 8 skipped, 18 pass in 0s', 'summary': - '28 tests\u2002\u2003\u200318 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n11 ' - 'suites\u2003\u2003\u205f\u20048 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/' - 'blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '28 tests\u2002\u2003\u200318 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n11 suites\u2003\u2003\u20078 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHdTJeBlDUGKjiGlhMt7dIqi49b3fvEMaWCeSvWgqISmATxBpDKg8uI25ZuTFx63tHh' 'goaB2/C7PAzuYTRsGa60lMiA6zwbC9xXj/gkl8vZuL3M1lTTtrwTPkS9Cs5HkBSPFg+uI' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations index 2d1f8db5..f8b19140 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations @@ -7,25 +7,10 @@ 'output': { 'title': 'All 183 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n183 ' - 'tests\u2003183 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n218 runs\u2006\u2003' - '218 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '\u2007\u20071 files\u2004\u2003102 suites\u2004\u2003\u20020s ' + ':stopwatch:\n183 tests\u2003183 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n218 runs\u200a\u2003218 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr' '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN' 'vmod4Hrf9qgi3/6K2C+SMMosNkMKxXs67aBE8yN28xchaMnPe0WxYghnQJPyp5nNtosNP' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations index 33356b44..f678db2a 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 4m 24s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4m 24s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n13 ' - 'suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u20034m 24s ' + ':stopwatch:\n13 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV' 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations index aaacf0d5..ca0c972e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfx0XoYQ1LhRwCxQGe/uykfo5s1M3s03OFfHZzZ0jLsA/ocloPRgDWFPSIP/pqlk4Y' 'JSVIy1OOBq32KTcGZbKlZEi/mCwRTfF1td4mqL3Mgity5ltQZPkBNzu+TPC/n9SCLdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations index 0bfffef3..0a746420 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 9 pass in 1s', 'summary': - '10 tests\u2002\u2003\u20039 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '10 tests\u2002\u2003\u20039 :white_check_mark:\u2003\u20031s ' + ':stopwatch:\n\u20071 suites\u2003\u20030 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv' '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys' 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations index 45560b17..e9665178 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 1 tests pass, 2 skipped in 0s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20032 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/Cz8zKGIMRGBdPCynh3KyKymzfTvlNa2AzJUXSNkBQhFJgjqgDeMbaMPIRnGr48Ud' 'Q63+ZihYOLvhRWwVa/TwbRY24wus/3xFr38m9LXMkS1y7t9x0CQ06CFiWvGx5uWF7dAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations index 0c8261f0..dd42640d 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 6 tests pass in 35s', 'summary': - '6 tests\u2002\u2003\u20036 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '35s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20036 :white_check_mark:\u2003\u200335s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dyiiDv1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawjbjpgef3992q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMC7eTeEN4AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations index fd92a858..4093bcc2 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations @@ -7,22 +7,9 @@ 'output': { 'title': 'All 1 tests pass, 1 skipped in 6s', 'summary': - '1 files\u2004\u20032 suites\u2004\u2003\u20026s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n2 runs\u2006\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '1 files\u2004\u20032 suites\u2004\u2003\u20026s :stopwatch:\n2 tests\u2003' + '1 :white_check_mark:\u20031 :zzz:\u20030 :x:\n2 runs\u200a\u20032 ' + ':white_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations index 0bbe872e..bacfefec 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations @@ -7,26 +7,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' @@ -1932,26 +1916,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' @@ -3671,26 +3639,10 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n146 ' - 'tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations index 98d07c08..c4391213 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue48478.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 3 tests pass in 17s', 'summary': - '3 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '17s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u200317s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfwUJl7GENS4EcEsUBnv7oKi0L2Zl8zJF1Cz5QNrKsatBxehJZg8CgdGh68npseFr0' 't7tF7KUmxwkKg/sQhQhZgRDb4GvU69MPPcw38tchaLnLek2XdwBO9idhX8ugG5zrfD3gA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations index b767ee76..c336d35c 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue50162.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLfwkFl7GEIS4kY9ZoDLe3RU1YDdvZvIOrsGowCfWNYyHBDFDT7AkFBG8I2wJaYj3NH' '55DklKKoZSbLDX71kLML+HQvT4XjC5z3fHWvdwsWWuZJlrl/TWQiR4Ewur4OcFmZnWM90' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations index d3aaa892..fa07d95f 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue5674.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 6 pass in 0s', 'summary': - '9 tests\u2002\u2003\u20036 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n3 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '9 tests\u2002\u2003\u20036 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n3 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MywqAIBREf' '0VctyiCoH5GxIwu+YirrqJ/72ZK7ubMDOfiGxgd+MKGjvGQIGYYCdaEMoJ3hD0hDfGd5p' 'pFSEpRMf3FAWf7FpsEU2xfoRE9lgsmV31vbHUf/7bMjSxz61LeWogEJbGwS34/WLAikt0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations index b4594abc..2d34ba36 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue6353.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 3s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20033s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHTRuXsYQlNgoYlqYjHe3KmrZ/ntt3q4dLCPpTjWV0pQgfjAkNBHCytgy8iGS3D0la/' 'NvFjNshXAGlkKMiAHZ1GwwrW/vmjL38F+7WcRuli0bvIfIkJeiyejjBNBleN/dAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations index a770afa3..2c8cf7c5 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-multinamespace.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 4 tests pass in 0s', 'summary': - '4 tests\u2002\u2003\u20034 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '4 tests\u2002\u2003\u20034 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdRYeRlCUOJGPmaBynh3F8SI3byZyTu5BrMGPrOhYzwkiAVGgiWhjOAdYU9IQ8zT9G' 'YRklL/YoejfQstwfyKFdFjbTC515djq3v4sxVuZIVbl/LWQiSoiYVN8usGDjGDkd0AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations index 10603db6..4ca4fd85 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-file.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '2 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations index a5323ee4..b5614f87 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-sec1752-https.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 pass in 0s', 'summary': - '2 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '2 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdTSyxCCEDfyMQtUxrsLCIrdm93JnFSBlp4uZBoI9RHCC2tEHsDZhGPC9Aj5NbfNfB' 'SiuvWww9HbTHHQP0MiOqwKRtt6efa5h79a4S5WuG8JZwyEBHURv3F63ZlK7bXdAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations index 3bc0ccac..bf971b3b 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-simple.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations index 783dc9ea..2e752f71 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n2 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations index 678c8cad..fcdfc2a4 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnitUnicode.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/trx/mstest/pickles.annotations b/python/test/files/trx/mstest/pickles.annotations index c2c19739..684ca48f 100644 --- a/python/test/files/trx/mstest/pickles.annotations +++ b/python/test/files/trx/mstest/pickles.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLTRaeRlCUONGAbNAZby7Kx+hmzczeTff4Fwdn9nQMe4C+B+WgNKDNYQ9IQ3+m6aShQ' 'tKUTHW4oCrfYtNwpltqVgRLeYLBlN8X2x1iastciOL3LqU1Ro8QU7M7ZI/L5ec2abdAAA' diff --git a/python/test/files/trx/nunit/FluentValidation.Tests.annotations b/python/test/files/trx/nunit/FluentValidation.Tests.annotations index 5afe0ced..025b4e12 100644 --- a/python/test/files/trx/nunit/FluentValidation.Tests.annotations +++ b/python/test/files/trx/nunit/FluentValidation.Tests.annotations @@ -7,19 +7,10 @@ 'output': { 'title': 'All 803 tests pass, 1 skipped in 3s', 'summary': - '804 tests\u2002\u2003\u2003803 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004\u205f\u2004' - '1 suites\u2003\u2003\u205f\u2004\u205f\u20041 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u2004\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '804 tests\u2002\u2003\u2003803 :white_check_mark:\u2003\u20033s ' + ':stopwatch:\n\u2007\u20071 suites\u2003\u2003\u2007\u20071 :zzz:\n\u2007\u2007' + '1 files\u2004\u2002\u2003\u2003\u2007\u20070 :x:\n\nResults for ' + 'commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dzDsEP1MiBUtacaqp+jf28yybm/mwex8AjN63rG6YtxHCC8MEVUAtxJKQjrCdbWiea' 'j3UeukZFELbDlwi0mBISFeMSI6zAbjWpoX/JO3KcXEn2Dib087ayEQ5MX8rPhxArdpBif' diff --git a/python/test/files/trx/nunit/NUnit-net461-sample.annotations b/python/test/files/trx/nunit/NUnit-net461-sample.annotations index 54e3926a..700f6de8 100644 --- a/python/test/files/trx/nunit/NUnit-net461-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-net461-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations index ec15c572..9543c8dc 100644 --- a/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/nunit/NUnit-netcoreapp3.1-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/nunit/SilentNotes.annotations b/python/test/files/trx/nunit/SilentNotes.annotations index b4b7860d..46cef341 100644 --- a/python/test/files/trx/nunit/SilentNotes.annotations +++ b/python/test/files/trx/nunit/SilentNotes.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 67 tests pass, 12 skipped in 0s', 'summary': - '79 tests\u2002\u2003\u200367 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200312 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '79 tests\u2002\u2003\u200367 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20071 suites\u2003\u200312 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCopcxBCVuRDELVMa7u/hB7HbmZWfnGszoeMeqgnEXwCcYAkoPdiUsCWnwcRLtC7' '0LSpFpxGdm2OJ7nYyWYPJCPyJafAyGNSXj/SveIgteIutdnOeUXRbwBM/F3CT5cQKN/0L' diff --git a/python/test/files/trx/xunit/dotnet-trx.annotations b/python/test/files/trx/xunit/dotnet-trx.annotations index 32397e73..72d086c9 100644 --- a/python/test/files/trx/xunit/dotnet-trx.annotations +++ b/python/test/files/trx/xunit/dotnet-trx.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '5 fail, 1 skipped, 5 pass in 0s', 'summary': - '11 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u20035 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '11 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20071 suites\u2003\u20031 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003' + '5 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLaSw8TKGIMaNfMwClfHuroCK3byZ3XfwBYwOfGSiYzwkiC/MCWUE7wh7QhpinsQDU0' 'hKUTN8xQZ7/S7FIsH8LjSix2rE5F7hnVtf4U+XubFlbmXKWwuRoCYWVsnPC2b3Tg/fAAA' diff --git a/python/test/files/trx/xunit/xUnit-net461-sample.annotations b/python/test/files/trx/xunit/xUnit-net461-sample.annotations index 3a4d596b..daed1553 100644 --- a/python/test/files/trx/xunit/xUnit-net461-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-net461-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations index f200c1a7..62c2df45 100644 --- a/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations +++ b/python/test/files/trx/xunit/xUnit-netcoreapp3.1-sample.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 2 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '6 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '3 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MZufmCo7V8ol1DePWg/th8SgcGE3YEtLBfacx79' 'l6KUn0RexwpecolICDxPCLFdFgCqLXuffNOhe51AJXscB1S5rzBEeQFrOb4M8LVJ41VN0' diff --git a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations index 8982261b..46f8d041 100644 --- a/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations +++ b/python/test/files/trx/yami_YAMILEX 2015-10-24 04_18_59.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '2 fail, 21 skipped, 2 pass in 26s', 'summary': - '25 tests\u2002\u2003\u2003\u205f\u20042 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '26s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n\u205f\u2004' - '1 suites\u2003\u200321 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20042 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '25 tests\u2002\u2003\u2003\u20072 :white_check_mark:\u2003\u200326s ' + ':stopwatch:\n\u20071 suites\u2003\u200321 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '2 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLcRECy9jCErciGAWqIx3d/GD2O3M28zONZjJ856JinEfIWQYI8oAzhI2HTEtIW1N+8' 'Lgo1LJfGKBLQmRjZZgfi8TokMyNRmMNhfTXQZvLnqXKHIXlzXl1hUCwXMxP0t+nB5bCu/' diff --git a/python/test/files/xunit/mstest/fixie.annotations b/python/test/files/xunit/mstest/fixie.annotations index b7bf755f..eb6a0589 100644 --- a/python/test/files/xunit/mstest/fixie.annotations +++ b/python/test/files/xunit/mstest/fixie.annotations @@ -7,22 +7,9 @@ 'output': { 'title': '3 fail, 1 skipped, 1 pass in 8s', 'summary': - '1 files\u2004\u20032 suites\u2004\u2003\u20028s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n5 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n7 runs\u2006\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20033 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '1 files\u2004\u20032 suites\u2004\u2003\u20028s :stopwatch:\n5 tests\u2003' + '1 :white_check_mark:\u20031 :zzz:\u20033 :x:\n7 runs\u200a\u20033 ' + ':white_check_mark:\u20031 :zzz:\u20033 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QSRZcJsaKh0hh1Fd29Scxs998b5p18hm2yvGdVwbj14ALUBKNH6cBowo6QDu45Ne' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations index b5213289..00b40d97 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase1.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XsYQhLiRj1mgMt5dQFDo5s1M3kUlKOHoQqaBUBfAf7AFZB6siThGjINP01zz6g' 'Ln5VuKA86ukAxUVwhEi0WIwVRfiq3u5d+WuZFlbl3cag0+QknE7YzeD2gV0DndAAAA\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations index 45804600..449c69a4 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase2.annotations @@ -7,22 +7,9 @@ 'output': { 'title': 'All 1 tests pass in 0s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'tests\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n3 runs\u2006\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n1 tests\u2003' + '1 :white_check_mark:\u20030 :zzz:\u20030 :x:\n3 runs\u200a\u20031 ' + ':white_check_mark:\u20031 :zzz:\u20031 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLTR2XoYQhLiRj1mWynh3UZBgN29m8k5uwOrIFzYNjMcE1GBNKAmCzzhmzAN905tFTE' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations index 30727b7f..27b9ab3d 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase3.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 1 pass in 4m 48s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4m 48s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20031 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20034m 48s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfw0xMsYgho3opgFKuPdXRAUuzezmzn5DHqyvGdNxbj14F4YPUoHZidshSBBJxeOXd' '6D9Uql7yRWOH5ilqB/YkI0SKYmg37PvTDL3MNfLXIRi1y2lNk2cARpMbtIft14m53n3wA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations index 95c7030e..8ce031e6 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase4.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations index 25934f35..806cceb4 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase5.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 1m 32s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 32s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20031m 32s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igQ/1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawj7lpgef39d2q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMCmAJ3I94AAAA' diff --git a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations index febae910..f0aa4cd5 100644 --- a/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations +++ b/python/test/files/xunit/mstest/jenkinsci/testcase6.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n1 ' - 'suites\u2003\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' - 'commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSw6AIAxEr' '0JYu9CtlzEEITbyMS2sjHcXERR28zrTd3INRhGf2TQwThHCB2tEEcC7hGPCVIRa5bxQlL' 'ItF9rh6A5agOleFKLHMsHoqu+Jre7l35a5kWVuXdJbCyFBSYw2wa8bniF3vN0AAAA=\n', diff --git a/python/test/files/xunit/mstest/pickles.annotations b/python/test/files/xunit/mstest/pickles.annotations index 1a9136aa..caf7ff2a 100644 --- a/python/test/files/xunit/mstest/pickles.annotations +++ b/python/test/files/xunit/mstest/pickles.annotations @@ -7,22 +7,9 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "duration of all tests")\n3 ' - 'tests\u20032 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n4 runs\u2006\u20033 ' - '[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-re' - 'sult-action/blob/VERSION/README.md#the-symbols "passed tests")\u20030 ' - '[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/b' - 'lob/VERSION/README.md#the-symbols "skipped / disabled tests")\u20031 ' - '[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blo' - 'b/VERSION/README.md#the-symbols "failed tests")\n\nResults for commit ' + '1 files\u2004\u20031 suites\u2004\u2003\u20020s :stopwatch:\n3 tests\u2003' + '2 :white_check_mark:\u20030 :zzz:\u20031 :x:\n4 runs\u200a\u20033 ' + ':white_check_mark:\u20030 :zzz:\u20031 :x:\n\nResults for commit ' 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLVSsvIwhqHEjH7NAZby7qwLSzZuZvJOvoBfPR9Y1jPsIocAcUQZwlrAlpCE8k8h58l' diff --git a/python/test/requirements.txt b/python/test/requirements.txt index e4db7ef0..a5090624 100644 --- a/python/test/requirements.txt +++ b/python/test/requirements.txt @@ -4,4 +4,3 @@ prettyprinter pytest pyyaml>=5.1 requests -urllib3<2.0.0 diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index 785fb50a..26992c0c 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -1,8 +1,8 @@ import io import json -import logging import os import pathlib +import platform import re import sys import tempfile @@ -13,8 +13,8 @@ from packaging.version import Version from publish import __version__, pull_request_build_mode_merge, fail_on_mode_failures, fail_on_mode_errors, \ - fail_on_mode_nothing, comment_modes, comment_mode_always, comment_mode_off, \ - report_suite_out_log, report_suite_err_log, report_suite_logs, report_no_suite_logs, default_report_suite_logs, \ + fail_on_mode_nothing, comment_modes, comment_mode_always, report_suite_out_log, report_suite_err_log, \ + report_suite_logs, report_no_suite_logs, default_report_suite_logs, \ default_annotations, all_tests_list, skipped_tests_list, none_annotations, \ pull_request_build_modes, punctuation_space from publish.github_action import GithubAction @@ -180,9 +180,11 @@ def get_settings(token='token', xunit_files_glob='xunit-files', trx_files_glob='trx-files', time_factor=1.0, + test_file_prefix=None, check_name='check name', comment_title='title', comment_mode=comment_mode_always, + check_run=True, job_summary=True, compare_earlier=True, test_changes_limit=10, @@ -228,9 +230,11 @@ def get_settings(token='token', xunit_files_glob=xunit_files_glob, trx_files_glob=trx_files_glob, time_factor=time_factor, + test_file_prefix=test_file_prefix, check_name=check_name, comment_title=comment_title, comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=compare_earlier, pull_request_build=pull_request_build, @@ -245,7 +249,7 @@ def get_settings(token='token', seconds_between_github_reads=seconds_between_github_reads, seconds_between_github_writes=seconds_between_github_writes, secondary_rate_limit_wait_seconds=secondary_rate_limit_wait_seconds, - search_pull_requests=search_pull_requests + search_pull_requests=search_pull_requests, ) def test_get_settings(self): @@ -353,6 +357,16 @@ def test_get_settings_time_unit(self): self.assertIn('TIME_UNIT minutes is not supported. It is optional, ' 'but when given must be one of these values: seconds, milliseconds', re.exception.args) + def test_get_settings_test_file_prefix(self): + self.do_test_get_settings(TEST_FILE_PREFIX=None, expected=self.get_settings(test_file_prefix=None)) + self.do_test_get_settings(TEST_FILE_PREFIX='', expected=self.get_settings(test_file_prefix=None)) + self.do_test_get_settings(TEST_FILE_PREFIX='+src/', expected=self.get_settings(test_file_prefix='+src/')) + self.do_test_get_settings(TEST_FILE_PREFIX='-./', expected=self.get_settings(test_file_prefix='-./')) + + with self.assertRaises(RuntimeError) as re: + self.do_test_get_settings(TEST_FILE_PREFIX='path/', expected=None) + self.assertIn("TEST_FILE_PREFIX is optional, but when given, it must start with '-' or '+': path/", re.exception.args) + def test_get_settings_commit(self): event = {'pull_request': {'head': {'sha': 'sha2'}}} self.do_test_get_settings(INPUT_COMMIT='sha', GITHUB_EVENT_NAME='pull_request', event=event, GITHUB_SHA='default', expected=self.get_settings(commit='sha', event=event, event_name='pull_request', is_fork=True)) @@ -444,6 +458,15 @@ def test_get_settings_compare_to_earlier_commit(self): self.do_test_get_settings(COMPARE_TO_EARLIER_COMMIT='foo', expected=self.get_settings(compare_earlier=True), warning=warning, exception=RuntimeError) self.do_test_get_settings(COMPARE_TO_EARLIER_COMMIT=None, expected=self.get_settings(compare_earlier=True)) + def test_get_settings_check_run(self): + warning = 'Option check_run has to be boolean, so either "true" or "false": foo' + self.do_test_get_settings(CHECK_RUN='false', expected=self.get_settings(check_run=False)) + self.do_test_get_settings(CHECK_RUN='False', expected=self.get_settings(check_run=False)) + self.do_test_get_settings(CHECK_RUN='true', expected=self.get_settings(check_run=True)) + self.do_test_get_settings(CHECK_RUN='True', expected=self.get_settings(check_run=True)) + self.do_test_get_settings(CHECK_RUN='foo', expected=self.get_settings(check_run=True), warning=warning, exception=RuntimeError) + self.do_test_get_settings(CHECK_RUN=None, expected=self.get_settings(check_run=True)) + def test_get_settings_job_summary(self): warning = 'Option job_summary has to be boolean, so either "true" or "false": foo' self.do_test_get_settings(JOB_SUMMARY='false', expected=self.get_settings(job_summary=False)) @@ -783,7 +806,7 @@ def test_get_files_single(self): with open(filename, mode='w'): pass - files = get_files('file1.txt') + files, _ = get_files('file1.txt') self.assertEqual(['file1.txt'], sorted(files)) def test_get_files_multi(self): @@ -796,7 +819,7 @@ def test_get_files_multi(self): with open(filename, mode='w'): pass - files = get_files(f'file1.txt{sep}file2.txt') + files, _ = get_files(f'file1.txt{sep}file2.txt') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_single_wildcard(self): @@ -809,7 +832,7 @@ def test_get_files_single_wildcard(self): with open(filename, mode='w'): pass - files = get_files(wildcard) + files, _ = get_files(wildcard) self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_multi_wildcard(self): @@ -822,8 +845,9 @@ def test_get_files_multi_wildcard(self): with open(filename, mode='w'): pass - files = get_files(f'*1.txt{sep}*3.bin') + files, absolute = get_files(f'*1.txt{sep}*3.bin') self.assertEqual(['file1.txt', 'file3.bin'], sorted(files)) + self.assertFalse(absolute) def test_get_files_subdir_and_wildcard(self): filenames = [os.path.join('sub', 'file1.txt'), @@ -839,7 +863,7 @@ def test_get_files_subdir_and_wildcard(self): with open(filename, mode='w'): pass - files = get_files('sub/*.txt') + files, _ = get_files('sub/*.txt') self.assertEqual([os.path.join('sub', 'file1.txt'), os.path.join('sub', 'file2.txt')], sorted(files)) @@ -864,7 +888,7 @@ def test_get_files_recursive_wildcard(self): with open(filename, mode='w'): pass - files = get_files(pattern) + files, _ = get_files(pattern) self.assertEqual(sorted(expected), sorted(files)) def test_get_files_symlinks(self): @@ -883,7 +907,7 @@ def test_get_files_symlinks(self): pass os.symlink(os.path.join(path, 'sub2'), os.path.join(path, 'sub1', 'sub2'), target_is_directory=True) - files = get_files(pattern) + files, _ = get_files(pattern) self.assertEqual(sorted(expected), sorted(files)) def test_get_files_character_range(self): @@ -894,7 +918,7 @@ def test_get_files_character_range(self): with open(filename, mode='w'): pass - files = get_files('file[0-2].*') + files, _ = get_files('file[0-2].*') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_multi_match(self): @@ -905,7 +929,7 @@ def test_get_files_multi_match(self): with open(filename, mode='w'): pass - files = get_files('*.txt\nfile*.txt\nfile2.*') + files, _ = get_files('*.txt\nfile*.txt\nfile2.*') self.assertEqual(['file1.txt', 'file2.txt'], sorted(files)) def test_get_files_absolute_path_and_wildcard(self): @@ -916,8 +940,9 @@ def test_get_files_absolute_path_and_wildcard(self): with open(filename, mode='w'): pass - files = get_files(os.path.join(path, '*')) + files, absolute = get_files(os.path.join(path, '*')) self.assertEqual([os.path.join(path, file) for file in filenames], sorted(files)) + self.assertTrue(absolute) def test_get_files_exclude_only(self): filenames = ['file1.txt', 'file2.txt', 'file3.bin'] @@ -927,7 +952,7 @@ def test_get_files_exclude_only(self): with open(filename, mode='w'): pass - files = get_files('!file*.txt') + files, _ = get_files('!file*.txt') self.assertEqual([], sorted(files)) def test_get_files_include_and_exclude(self): @@ -938,12 +963,12 @@ def test_get_files_include_and_exclude(self): with open(filename, mode='w'): pass - files = get_files('*.txt\n!file1.txt') + files, _ = get_files('*.txt\n!file1.txt') self.assertEqual(['file2.txt'], sorted(files)) def test_get_files_with_mock(self): with mock.patch('publish_test_results.glob') as m: - files = get_files('*.txt\n!file1.txt') + files, _ = get_files('*.txt\n!file1.txt') self.assertEqual([], files) self.assertEqual([mock.call('*.txt', recursive=True), mock.call('file1.txt', recursive=True)], m.call_args_list) @@ -1006,8 +1031,9 @@ def test_parse_files(self): self.assertEqual([], gha.method_calls) self.assertEqual(145, actual.files) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): + # on macOS and below Python 3.9 we see one particular error self.assertEqual(17, len(actual.errors)) self.assertEqual(731, actual.suites) self.assertEqual(4109, actual.suite_tests) @@ -1058,7 +1084,8 @@ def test_parse_files(self): '::error file=malformed-json.json::Error processing result file: Unsupported file format: malformed-json.json', '::error file=non-json.json::Error processing result file: Unsupported file format: non-json.json', ] - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): expected.extend([ '::error::lxml.etree.XMLSyntaxError: Failure to process entity xxe, line 17, column 51', '::error file=NUnit-sec1752-file.xml::Error processing result file: Failure to process entity xxe, line 17, column 51 (NUnit-sec1752-file.xml, line 17)', @@ -1088,8 +1115,9 @@ def test_parse_files_with_suite_details(self): **options) actual = parse_files(settings, gha) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): + # on macOS (below macOS 13) and Python below 3.9 we see one particular error self.assertEqual(363, len(actual.suite_details)) else: self.assertEqual(365, len(actual.suite_details)) @@ -1109,9 +1137,17 @@ def test_parse_files_no_matches(self): gha.warning.assert_has_calls([ mock.call(f'Could not find any JUnit XML files for {missing_junit}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any NUnit XML files for {missing_nunit}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any XUnit XML files for {missing_xunit}'), - mock.call(f'Could not find any TRX files for {missing_trx}') + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'Could not find any TRX files for {missing_trx}'), + mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), + mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), ]) gha.error.assert_not_called() @@ -1171,8 +1207,9 @@ def test_main(self): # Publisher.publish is expected to have been called with these arguments results, cases, conclusion = m.call_args_list[0].args self.assertEqual(145, results.files) - if Version(sys.version.split(' ')[0]) >= Version('3.10.0') and sys.platform.startswith('darwin'): - # on macOS and Python 3.10 and above we see one particular error + if Version(sys.version.split(' ')[0]) < Version('3.9.0') and sys.platform.startswith('darwin') and \ + (platform.mac_ver()[0].startswith("11.") or platform.mac_ver()[0].startswith("12.")): + # on macOS and below Python 3.9 we see one particular error self.assertEqual(731, results.suites) self.assertEqual(731, len(results.suite_details)) self.assertEqual(1811, len(cases)) @@ -1274,12 +1311,12 @@ def test_deprecate_val(self): def test_action_fail(self): for action_fail, action_fail_on_inconclusive, expecteds in [ - (False, False, [False] * 3), - (False, True, [True, False, False]), - (True, False, [False, False, True]), - (True, True, [True, False, True]), + (False, False, [False] * 4), + (False, True, [True, False, False, False]), + (True, False, [False, False, True, False]), + (True, True, [True, False, True, False]), ]: - for expected, conclusion in zip(expecteds, ['inconclusive', 'success', 'failure']): + for expected, conclusion in zip(expecteds, ['neutral', 'success', 'failure', 'unknown']): with self.subTest(action_fail=action_fail, action_fail_on_inconclusive=action_fail_on_inconclusive, conclusion=conclusion): actual = action_fail_required(conclusion, action_fail, action_fail_on_inconclusive) self.assertEqual(expected, actual) diff --git a/python/test/test_cicd_yml.py b/python/test/test_cicd_yml.py index 47bbd2c5..6e695db6 100644 --- a/python/test/test_cicd_yml.py +++ b/python/test/test_cicd_yml.py @@ -12,7 +12,7 @@ def test_cicd_workflow(self): with open(project_root / 'action.yml', encoding='utf-8') as r: action = yaml.safe_load(r) - with open(project_root / '.github/workflows/ci-cd.yml', encoding='utf-8') as r: + with open(project_root / '.github/workflows/publish.yml', encoding='utf-8') as r: cicd = yaml.safe_load(r) docker_image_steps = cicd.get('jobs', []).get('publish-docker-image', {}).get('steps', []) diff --git a/python/test/test_junit.py b/python/test/test_junit.py index d71600c7..3cf60214 100644 --- a/python/test/test_junit.py +++ b/python/test/test_junit.py @@ -19,8 +19,8 @@ sys.path.append(str(pathlib.Path(__file__).resolve().parent)) from publish import __version__, available_annotations, none_annotations -from publish.junit import is_junit, parse_junit_xml_files, process_junit_xml_elems, get_results, get_result, get_content, \ - get_message, Disabled, JUnitTreeOrParseError, ParseError +from publish.junit import is_junit, parse_junit_xml_files, adjust_prefix, process_junit_xml_elems, get_results, \ + get_result, get_content, get_message, Disabled, JUnitTreeOrParseError, ParseError from publish.unittestresults import ParsedUnitTestResults, UnitTestCase from publish_test_results import get_test_results, get_stats, get_conclusion from publish.publisher import Publisher @@ -97,6 +97,21 @@ def shorten_filename(cls, filename, prefix=None): else: return filename + def test_adjust_prefix(self): + self.assertEqual(adjust_prefix("file", "+"), "file") + self.assertEqual(adjust_prefix("file", "+."), ".file") + self.assertEqual(adjust_prefix("file", "+./"), "./file") + self.assertEqual(adjust_prefix("file", "+path/"), "path/file") + + self.assertEqual(adjust_prefix("file", "-"), "file") + self.assertEqual(adjust_prefix(".file", "-."), "file") + self.assertEqual(adjust_prefix("./file", "-./"), "file") + self.assertEqual(adjust_prefix("path/file", "-path/"), "file") + self.assertEqual(adjust_prefix("file", "-"), "file") + self.assertEqual(adjust_prefix("file", "-."), "file") + self.assertEqual(adjust_prefix("file", "-./"), "file") + self.assertEqual(adjust_prefix("file", "-path/"), "file") + def do_test_parse_and_process_files(self, filename: str): for locale in [None, 'en_US.UTF-8', 'de_DE.UTF-8']: with self.test.subTest(file=self.shorten_filename(filename), locale=locale): @@ -299,7 +314,7 @@ def test_process_parse_junit_xml_files_with_time_factor(self): for time_factor in [1.0, 10.0, 60.0, 0.1, 0.001]: with self.subTest(time_factor=time_factor): self.assertEqual( - process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), time_factor), + process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), time_factor=time_factor), ParsedUnitTestResults( files=1, errors=[], @@ -379,6 +394,32 @@ def test_process_parse_junit_xml_files_with_time_factor(self): ] )) + def test_process_parse_junit_xml_files_with_test_file_prefix(self): + result_file = str(test_files_path / 'pytest' / 'junit.fail.xml') + for prefix in ["+python/", "-test/", "-src"]: + with self.subTest(prefix=prefix): + test_file = adjust_prefix('test/test_spark.py', prefix) + self.assertEqual( + process_junit_xml_elems(parse_junit_xml_files([result_file], False, False), test_file_prefix=prefix), + ParsedUnitTestResults( + files=1, + errors=[], + suites=1, + suite_tests=5, + suite_skipped=1, + suite_failures=1, + suite_errors=0, + suite_time=2, + suite_details=[], + cases=[ + UnitTestCase(result_file=result_file, test_file=test_file, line=1412, class_name='test.test_spark.SparkTests', test_name='test_check_shape_compatibility', result='success', message=None, content=None, stdout=None, stderr=None, time=6.435), + UnitTestCase(result_file=result_file, test_file=test_file, line=1641, class_name='test.test_spark.SparkTests', test_name='test_get_available_devices', result='skipped', message='get_available_devices only supported in Spark 3.0 and above', content='/horovod/test/test_spark.py:1642: get_available_devices only\n supported in Spark 3.0 and above\n ', stdout=None, stderr=None, time=0.001), + UnitTestCase(result_file=result_file, test_file=test_file, line=1102, class_name='test.test_spark.SparkTests', test_name='test_get_col_info', result='success', message=None, content=None, stdout=None, stderr=None, time=6.417), + UnitTestCase(result_file=result_file, test_file=test_file, line=819, class_name='test.test_spark.SparkTests', test_name='test_rsh_events', result='failure', message='self = def test_rsh_events(self): > self.do_test_rsh_events(3) test_spark.py:821: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ test_spark.py:836: in do_test_rsh_events self.do_test_rsh(command, 143, events=events) test_spark.py:852: in do_test_rsh self.assertEqual(expected_result, res) E AssertionError: 143 != 0', content='self = \n\n def test_rsh_events(self):\n > self.do_test_rsh_events(3)\n\n test_spark.py:821:\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n test_spark.py:836: in do_test_rsh_events\n self.do_test_rsh(command, 143, events=events)\n test_spark.py:852: in do_test_rsh\n self.assertEqual(expected_result, res)\n E AssertionError: 143 != 0\n ', stdout=None, stderr=None, time=7.541), + UnitTestCase(result_file=result_file, test_file=test_file, line=813, class_name='test.test_spark.SparkTests', test_name='test_rsh_with_non_zero_exit_code', result='success', message=None, content=None, stdout=None, stderr=None, time=1.514) + ] + )) + def test_get_results(self): success = TestElement('success') skipped = TestElement('skipped') diff --git a/python/test/test_publish.py b/python/test/test_publish.py index f49f0b2a..294d90f7 100644 --- a/python/test/test_publish.py +++ b/python/test/test_publish.py @@ -4,10 +4,10 @@ import mock -from publish import __version__, Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ - get_error_annotation, get_digest_from_stats, \ +from publish import Annotation, UnitTestSuite, UnitTestRunResults, UnitTestRunDeltaResults, CaseMessages, \ + get_json_path, get_error_annotation, get_digest_from_stats, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ - duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, \ + duration_label_md, SomeTestChanges, abbreviate, abbreviate_bytes, get_test_name, get_formatted_digits, digit_space, \ get_magnitude, get_delta, as_short_commit, as_delta, as_stat_number, as_stat_duration, get_stats_from_digest, \ digest_string, ungest_string, get_details_line_md, get_commit_line_md, restrict_unicode, \ get_short_summary, get_short_summary_md, get_long_summary_md, get_long_summary_with_runs_md, \ @@ -29,6 +29,22 @@ class PublishTest(unittest.TestCase): old_locale = None details = [UnitTestSuite('suite', 7, 3, 2, 1, 'std-out', 'std-err')] + def test_get_json_path(self): + detail = {'a': 'A', 'b': 'B', 'c': ['d'], 'e': {}, 'f': None} + json = {'id': 1, 'name': 'Name', 'detail': detail} + + self.assertEqual(None, get_json_path(json, 'not there')) + self.assertEqual(1, get_json_path(json, 'id')) + self.assertEqual('Name', get_json_path(json, 'name')) + self.assertEqual(detail, get_json_path(json, 'detail')) + self.assertEqual('A', get_json_path(json, 'detail.a')) + self.assertEqual(None, get_json_path(json, 'detail.a.g')) + self.assertEqual(['d'], get_json_path(json, 'detail.c')) + self.assertEqual({}, get_json_path(json, 'detail.e')) + self.assertEqual(None, get_json_path(json, 'detail.e.g')) + self.assertEqual(None, get_json_path(json, 'detail.f')) + self.assertEqual(None, get_json_path(json, 'detail.f.g')) + def test_test_changes(self): changes = SomeTestChanges(['removed-test', 'removed-skip', 'remain-test', 'remain-skip', 'skip', 'unskip'], ['remain-test', 'remain-skip', 'skip', 'unskip', 'add-test', 'add-skip'], @@ -404,42 +420,42 @@ def test_as_delta(self): self.assertEqual(as_delta(+1, 1), '+1') self.assertEqual(as_delta(-2, 1), ' - 2') - self.assertEqual(as_delta(0, 2), '±  0') - self.assertEqual(as_delta(+1, 2), '+  1') - self.assertEqual(as_delta(-2, 2), ' -   2') + self.assertEqual(as_delta(0, 2), f'±{digit_space}0') + self.assertEqual(as_delta(+1, 2), f'+{digit_space}1') + self.assertEqual(as_delta(-2, 2), f' - {digit_space}2') - self.assertEqual(as_delta(1, 5), '+       1') - self.assertEqual(as_delta(12, 5), '+     12') - self.assertEqual(as_delta(123, 5), '+   123') + self.assertEqual(as_delta(1, 5), f'+{digit_space} {digit_space}{digit_space}1') + self.assertEqual(as_delta(12, 5), f'+{digit_space} {digit_space}12') + self.assertEqual(as_delta(123, 5), f'+{digit_space} 123') self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') with temp_locale('en_US'): self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') with temp_locale('de_DE'): self.assertEqual(as_delta(1234, 5), '+1 234') - self.assertEqual(as_delta(1234, 6), '+  1 234') - self.assertEqual(as_delta(123, 6), '+     123') + self.assertEqual(as_delta(1234, 6), f'+{digit_space}1 234') + self.assertEqual(as_delta(123, 6), f'+{digit_space}{digit_space} 123') def test_as_stat_number(self): label = 'unit' self.assertEqual(as_stat_number(None, 1, 0, label), 'N/A unit') self.assertEqual(as_stat_number(1, 1, 0, label), '1 unit') - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') with temp_locale('en_US'): - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') with temp_locale('de_DE'): - self.assertEqual(as_stat_number(123, 6, 0, label), '     123 unit') - self.assertEqual(as_stat_number(1234, 6, 0, label), '  1 234 unit') + self.assertEqual(as_stat_number(123, 6, 0, label), f'{digit_space}{digit_space} 123 unit') + self.assertEqual(as_stat_number(1234, 6, 0, label), f'{digit_space}1 234 unit') self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit') self.assertEqual(as_stat_number(dict(number=1), 1, 0, label), '1 unit') @@ -447,16 +463,16 @@ def test_as_stat_number(self): self.assertEqual(as_stat_number(dict(number=1, delta=-1), 1, 1, label), '1 unit  - 1 ') self.assertEqual(as_stat_number(dict(number=2, delta=+0), 1, 1, label), '2 unit ±0 ') self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 1, label), '3 unit +1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 2, label), '3 unit +  1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1), 2, 2, label), '  3 unit +  1 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 2, label), f'3 unit +{digit_space}1 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1), 2, 2, label), f'{digit_space}3 unit +{digit_space}1 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') with temp_locale('en_US'): - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') with temp_locale('de_DE'): - self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit +  1 234 ') - self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), f'3 unit +{digit_space}1 234 ') + self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), f'3 unit +12 345 ') self.assertEqual(as_stat_number(dict(delta=-1), 3, 1, label), 'N/A unit  - 1 ') @@ -539,11 +555,11 @@ def test_get_short_summary(self): def test_label_md(self): self.assertEqual(all_tests_label_md, 'tests') - self.assertEqual(passed_tests_label_md, f'[:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")') - self.assertEqual(skipped_tests_label_md, f'[:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")') - self.assertEqual(failed_tests_label_md, f'[:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")') - self.assertEqual(test_errors_label_md, f'[:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")') - self.assertEqual(duration_label_md, f'[:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")') + self.assertEqual(passed_tests_label_md, f':white_check_mark:') + self.assertEqual(skipped_tests_label_md, f':zzz:') + self.assertEqual(failed_tests_label_md, f':x:') + self.assertEqual(test_errors_label_md, f':fire:') + self.assertEqual(duration_label_md, f':stopwatch:') def test_get_short_summary_md(self): self.assertEqual(get_short_summary_md(UnitTestRunResults( @@ -657,9 +673,9 @@ def test_get_long_summary_with_runs_md(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=0, runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=0, commit='commit' - )), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + )), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' f'\n' f'Results for commit commit.\n')) @@ -669,9 +685,9 @@ def test_get_long_summary_with_runs_md_with_errors(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8, runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13, commit='commit' - )), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + )), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' f'\n' f'Results for commit commit.\n')) @@ -681,9 +697,9 @@ def test_get_long_summary_with_runs_md_with_deltas(self): tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9), runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14), commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef' - )), (f'1 files  +  2    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + )), (f'1 files  +{digit_space}2  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' f'\n' f'Results for commit 12345678. ± Comparison against type commit 01234567.\n')) @@ -696,9 +712,9 @@ def test_get_long_summary_with_runs_md_with_details_url_with_fails(self): commit='commit' ), 'https://details.url/' - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' f'\n' f'For more details on these failures, see [this check](https://details.url/).\n' f'\n' @@ -714,9 +730,9 @@ def test_get_long_summary_with_runs_md_with_details_url_without_fails(self): commit='commit' ), 'https://details.url/' - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' f'\n' f'Results for commit commit.\n') ) @@ -734,9 +750,9 @@ def test_get_long_summary_with_runs_md_with_test_lists(self): ['test1', 'test2', 'test3', 'test4', 'test5'], ['test5', 'test6'], ['test2'], ['test5', 'test6'] ), - ), (f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + ), (f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -800,7 +816,7 @@ def test_get_long_summary_without_runs_md_with_errors(self): tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8, runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8, commit='commit' - )), (f'4 {all_tests_label_md}   5 {passed_tests_label_md}  3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + )), (f'4 {all_tests_label_md}   5 {passed_tests_label_md}  3s :stopwatch:\n' f'2 suites  6 {skipped_tests_label_md}\n' f'1 files    7 {failed_tests_label_md}  8 {test_errors_label_md}\n' f'\n' @@ -997,9 +1013,9 @@ def test_get_long_summary_with_digest_md_with_multiple_runs(self): ) ) - self.assertEqual(actual, f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1021,9 +1037,9 @@ def test_get_long_summary_with_digest_md_with_test_errors(self): ) ) - self.assertEqual(actual, f'1 files    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1045,9 +1061,9 @@ def test_get_long_summary_with_digest_md_with_parse_errors(self): ) ) - self.assertEqual(actual, f'1 files    1 errors    2 suites   3s {duration_label_md}\n' - f'4 {all_tests_label_md}   5 {passed_tests_label_md}   6 {skipped_tests_label_md}   7 {failed_tests_label_md}   8 {test_errors_label_md}\n' - f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' + self.assertEqual(actual, f'1 files  {digit_space}1 errors  {digit_space}2 suites   3s {duration_label_md}\n' + f'4 {all_tests_label_md} {digit_space}5 {passed_tests_label_md} {digit_space}6 {skipped_tests_label_md} {digit_space}7 {failed_tests_label_md} {digit_space}8 {test_errors_label_md}\n' + f'9 runs  10 {passed_tests_label_md} 11 {skipped_tests_label_md} 12 {failed_tests_label_md} 13 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1074,9 +1090,9 @@ def test_get_long_summary_with_digest_md_with_delta(self): ) ) - self.assertEqual(actual, f'1 files  +  2    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + self.assertEqual(actual, f'1 files  +{digit_space}2  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' '\n' 'Results for commit 12345678. ± Comparison against type commit 01234567.\n' '\n' @@ -1103,9 +1119,9 @@ def test_get_long_summary_with_digest_md_with_delta_and_parse_errors(self): ) ) - self.assertEqual(actual, f'1 files  +  2    1 errors    2 suites   - 3   3s {duration_label_md} +4s\n' - f'4 {all_tests_label_md}  -   5    5 {passed_tests_label_md} +  6    6 {skipped_tests_label_md}  -   7    7 {failed_tests_label_md} +  8    8 {test_errors_label_md}  -   9 \n' - f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' + self.assertEqual(actual, f'1 files  +{digit_space}2  {digit_space}1 errors  {digit_space}2 suites   - 3   3s {duration_label_md} +4s\n' + f'4 {all_tests_label_md}  - {digit_space}5  {digit_space}5 {passed_tests_label_md} +{digit_space}6  {digit_space}6 {skipped_tests_label_md}  - {digit_space}7  {digit_space}7 {failed_tests_label_md} +{digit_space}8  {digit_space}8 {test_errors_label_md}  - {digit_space}9 \n' + f'9 runs  +10  10 {passed_tests_label_md}  - 11  11 {skipped_tests_label_md} +12  12 {failed_tests_label_md}  - 13  13 {test_errors_label_md} +14 \n' '\n' 'Results for commit 12345678. ± Comparison against type commit 01234567.\n' '\n' @@ -2063,9 +2079,9 @@ def test_files(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'  10 files    10 suites   39m 1s {duration_label_md}\n' - f'217 {all_tests_label_md} 208 {passed_tests_label_md}   9 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' - f'373 runs  333 {passed_tests_label_md} 40 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + self.assertEqual(md, (f'{digit_space}10 files  {digit_space}10 suites   39m 1s {duration_label_md}\n' + f'217 {all_tests_label_md} 208 {passed_tests_label_md} {digit_space}9 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' + f'373 runs  333 {passed_tests_label_md} 40 {skipped_tests_label_md} 0 {failed_tests_label_md}\n' f'\n' f'Results for commit example.\n')) @@ -2074,7 +2090,7 @@ def test_file_without_cases(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s :stopwatch:\n' f'1 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'\n' @@ -2085,7 +2101,7 @@ def test_file_without_cases_but_with_tests(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'6 {all_tests_label_md}   3 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'6 {all_tests_label_md}   3 {passed_tests_label_md}  0s :stopwatch:\n' f'1 suites  2 {skipped_tests_label_md}\n' f'1 files    1 {failed_tests_label_md}\n' f'\n' @@ -2096,7 +2112,7 @@ def test_non_parsable_file(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'0 {all_tests_label_md}   0 {passed_tests_label_md}  0s :stopwatch:\n' f'0 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'1 errors\n' @@ -2108,7 +2124,7 @@ def test_files_with_testsuite_in_testsuite(self): results = get_test_results(parsed, False) stats = get_stats(results) md = get_long_summary_md(stats) - self.assertEqual(md, (f'5 {all_tests_label_md}   5 {passed_tests_label_md}  4s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' + self.assertEqual(md, (f'5 {all_tests_label_md}   5 {passed_tests_label_md}  4s :stopwatch:\n' f'4 suites  0 {skipped_tests_label_md}\n' f'1 files    0 {failed_tests_label_md}\n' f'\n' @@ -2134,8 +2150,8 @@ def test_files_without_annotations(self): stats = get_stats(results) md = get_long_summary_md(stats) self.assertEqual(md, (f'373 {all_tests_label_md}   333 {passed_tests_label_md}  39m 1s {duration_label_md}\n' - f'  10 suites    40 {skipped_tests_label_md}\n' - f'  10 files        0 {failed_tests_label_md}\n' + f'{digit_space}10 suites  {digit_space}40 {skipped_tests_label_md}\n' + f'{digit_space}10 files    {digit_space}{digit_space}0 {failed_tests_label_md}\n' f'\n' f'Results for commit example.\n')) diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index 711aa040..c403a1bd 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -13,13 +13,13 @@ import mock from github import Github, GithubException -from publish import __version__, comment_mode_off, comment_mode_always, \ +from publish import __version__, get_json_path, comment_mode_off, comment_mode_always, \ comment_mode_changes, comment_mode_changes_failures, comment_mode_changes_errors, \ comment_mode_failures, comment_mode_errors, Annotation, default_annotations, \ get_error_annotation, digest_header, get_digest_from_stats, \ all_tests_list, skipped_tests_list, none_annotations, \ all_tests_label_md, skipped_tests_label_md, failed_tests_label_md, passed_tests_label_md, test_errors_label_md, \ - duration_label_md, pull_request_build_mode_merge, punctuation_space, \ + duration_label_md, digit_space, pull_request_build_mode_merge, punctuation_space, \ get_long_summary_with_digest_md from publish.github_action import GithubAction from publish.publisher import Publisher, Settings, PublishData @@ -79,6 +79,7 @@ def create_github_pr(repo: str, @staticmethod def create_settings(actor='actor', comment_mode=comment_mode_always, + check_run=True, job_summary=True, compare_earlier=True, report_individual_runs=False, @@ -120,9 +121,11 @@ def create_settings(actor='actor', xunit_files_glob=None, trx_files_glob=None, time_factor=1.0, + test_file_prefix=None, check_name='Check Name', comment_title='Comment Title', comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=compare_earlier, pull_request_build=pull_request_build, @@ -137,7 +140,7 @@ def create_settings(actor='actor', seconds_between_github_reads=1.5, seconds_between_github_writes=2.5, secondary_rate_limit_wait_seconds=6.0, - search_pull_requests=search_pull_requests + search_pull_requests=search_pull_requests, ) stats = UnitTestRunResults( @@ -490,6 +493,22 @@ def test_publish_without_job_summary_and_comment(self): self.assertEqual((self.stats, self.cases, 'success'), args) self.assertEqual({}, kwargs) + def test_publish_without_job_summary_and_comment_on_fork(self): + settings = self.create_settings(is_fork=True, comment_mode=comment_mode_off, job_summary=False) + mock_calls = self.call_mocked_publish(settings, prs=[object()]) + + self.assertEqual(1, len(mock_calls)) + (method, args, kwargs) = mock_calls[0] + self.assertEqual('get_check_run', method) + self.assertEqual(('before', ), args) + self.assertEqual({}, kwargs) + + def test_publish_without_check_run_job_summary_and_comment(self): + settings = self.create_settings(comment_mode=comment_mode_off, job_summary=False, check_run=False) + mock_calls = self.call_mocked_publish(settings, prs=[object()]) + + self.assertEqual(0, len(mock_calls)) + def test_publish_with_comment_without_pr(self): settings = self.create_settings() mock_calls = self.call_mocked_publish(settings, prs=[]) @@ -669,18 +688,18 @@ def test_publish_comment_compare_earlier_with_restricted_unicode(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('create_issue_comment', method) self.assertEqual(('## title\n' - '\u205f\u20041 files\u2004 ±\u205f\u20040\u2002\u2003' + f'{digit_space}1 files\u2004 ±{digit_space}0\u2002\u2003' '2 suites\u2004 +1\u2002\u2003\u2002' - f'3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests") +2s\n' + f'3s :stopwatch: +2s\n' '22 tests +19\u2002\u2003' - f'4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") +3\u2002\u2003' - f'5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +3\u2002\u2003\u205f\u2004' - f'6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +\u205f\u20046\u2002\u2003\u205f\u2004' - f'7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +\u205f\u20047\u2002\n' - f'38 runs\u2006 +35\u2002\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") +7\u2002\u2003' - f'9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +7\u2002\u2003' - f'10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +10\u2002\u2003' - f'11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +11\u2002\n' + f'4 :white_check_mark: +3\u2002\u2003' + f'5 :zzz: +3\u2002\u2003{digit_space}' + f'6 :x: +{digit_space}6\u2002\u2003{digit_space}' + f'7 :fire: +{digit_space}7\u2002\n' + f'38 runs\u200a +35\u2002\u20038 :white_check_mark: +7\u2002\u2003' + f'9 :zzz: +7\u2002\u2003' + f'10 :x: +10\u2002\u2003' + f'11 :fire: +11\u2002\n' '\n' 'For more details on these failures and errors, see [this check](html://url).\n' '\n' @@ -887,6 +906,22 @@ def test_get_pull_from_event(self): actual = publisher.get_pull_from_event() self.assertIs(actual, pr) repo.get_pull.assert_called_once_with(1234) + repo.get_pull.reset_mock() + + # test with none in pull request + for event in [ + {}, + {'pull_request': None}, + {'pull_request': {'number': 1234, 'base': None}}, + {'pull_request': {'number': 1234, 'base': {'repo': None}}}, + {'pull_request': {'number': 1234, 'base': {'repo': {}}}}, + ]: + settings = self.create_settings(event=event) + publisher = Publisher(settings, gh, gha) + + actual = publisher.get_pull_from_event() + self.assertIsNone(actual) + repo.get_pull.assert_not_called() def do_test_get_pulls(self, settings: Settings, @@ -910,7 +945,7 @@ def do_test_get_pulls(self, else: gh.search_issues.assert_not_called() if event_pull_request is not None and \ - settings.repo == settings.event.get('pull_request', {}).get('base', {}).get('repo', {}).get('full_name'): + settings.repo == get_json_path(settings.event, 'pull_request.base.repo.full_name'): repo.get_pull.assert_called_once_with(event_pull_request.number) commit.get_pulls.assert_not_called() else: @@ -1287,9 +1322,9 @@ def do_test_publish_check_without_base_stats(self, errors: List[ParseError], ann output={ 'title': '{}7 errors, 6 fail, 5 skipped, 4 pass in 3s' .format('{} parse errors, '.format(len(errors)) if len(errors) > 0 else ''), - 'summary': f'\u205f\u20041 files\u2004\u2003{{errors}}2 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + 'summary': f'{digit_space}1 files\u2004\u2003{{errors}}2 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1317,7 +1352,7 @@ def do_test_publish_check_without_base_stats(self, errors: List[ParseError], ann 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  {summary_errors}2 suites   3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n22 tests 4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")   6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")   7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n38 runs  8 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n\\nResults for commit commit.\\n", ' + f'"summary": "{digit_space}1 files  {summary_errors}2 suites   3s :stopwatch:\\n22 tests 4 :white_check_mark: 5 :zzz: {digit_space}6 :x: {digit_space}7 :fire:\\n38 runs  8 :white_check_mark: 9 :zzz: 10 :x: 11 :fire:\\n\\nResults for commit commit.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' f'"annotations": {len(annotations)}, ' @@ -1354,9 +1389,9 @@ def do_test_publish_check_with_base_stats(self, errors: List[ParseError]): output={ 'title': '{}7 errors, 6 fail, 5 skipped, 4 pass in 3s' .format('{} parse errors, '.format(len(errors)) if len(errors) > 0 else ''), - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u2003{{errors}}2 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u2003{{errors}}2 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1389,7 +1424,7 @@ def do_test_publish_check_with_base_stats(self, errors: List[ParseError]): 'json', '{' f'"title": "{title_errors}7 errors, 6 fail, 5 skipped, 4 pass in 3s", ' - f'"summary": "  1 files  ±0  {summary_errors}2 suites  ±0   3s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\") ±0s\\n22 tests +1  4 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")  -   8  5 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") +1    6 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") +4    7 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\") +  4 \\n38 runs  +1  8 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")  - 17  9 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") +2  10 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") +6  11 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\") +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' + f'"summary": "{digit_space}1 files  ±0  {summary_errors}2 suites  ±0   3s :stopwatch: ±0s\\n22 tests +1  4 :white_check_mark:  - {digit_space}8  5 :zzz: +1  {digit_space}6 :x: +4  {digit_space}7 :fire: +{digit_space}4 \\n38 runs  +1  8 :white_check_mark:  - 17  9 :zzz: +2  10 :x: +6  11 :fire: +10 \\n\\nResults for commit commit. ± Comparison against earlier commit past.\\n", ' '"conclusion": "conclusion", ' '"stats": {"files": 1, ' + f'"errors": {len(errors)}, ' + '"suites": 2, "duration": 3, "tests": 22, "tests_succ": 4, "tests_skip": 5, "tests_fail": 6, "tests_error": 7, "runs": 38, "runs_succ": 8, "runs_skip": 9, "runs_fail": 10, "runs_error": 11, "commit": "commit"}, ' '"stats_with_delta": {"files": {"number": 1, "delta": 0}, ' + f'"errors": {len(errors)}, ' + '"suites": {"number": 2, "delta": 0}, "duration": {"duration": 3, "delta": 0}, "tests": {"number": 22, "delta": 1}, "tests_succ": {"number": 4, "delta": -8}, "tests_skip": {"number": 5, "delta": 1}, "tests_fail": {"number": 6, "delta": 4}, "tests_error": {"number": 7, "delta": 4}, "runs": {"number": 38, "delta": 1}, "runs_succ": {"number": 8, "delta": -17}, "runs_skip": {"number": 9, "delta": 2}, "runs_fail": {"number": 10, "delta": 6}, "runs_error": {"number": 11, "delta": 10}, "commit": "commit", "reference_type": "earlier", "reference_commit": "past"}, ' @@ -1420,9 +1455,9 @@ def test_publish_check_without_compare(self): conclusion='conclusion', output={ 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + 'summary': f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -1482,9 +1517,9 @@ def test_publish_check_with_multiple_annotation_pages(self): conclusion='conclusion', output={ 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1515,9 +1550,9 @@ def test_publish_check_with_multiple_annotation_pages(self): outputs = [ { 'title': '7 errors, 6 fail, 5 skipped, 4 pass in 3s', - 'summary': f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + 'summary': f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against earlier commit past.\n' '\n' @@ -1643,9 +1678,9 @@ def test_publish_check_with_suite_details(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + 'summary': f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1679,9 +1714,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests   1 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 2 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 3 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 1 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs  -12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 4 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 5 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 6 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files  {digit_space}1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests {digit_space}1 :white_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :white_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1715,9 +1750,9 @@ def test_publish_check_with_suite_details(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1766,9 +1801,9 @@ def test_publish_check_with_cases(self): conclusion='conclusion', output={ 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', - 'summary': f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + 'summary': f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\n' '\n' 'Results for commit commit.\n' '\n' @@ -1803,9 +1838,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files    1 errors  2 suites   7s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests   1 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 2 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 3 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 1 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs  -12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\") 4 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\") 5 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\") 6 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files  {digit_space}1 errors  2 suites   7s :stopwatch:\\n' + f'7 tests {digit_space}1 :white_check_mark: 2 :zzz: 3 :x: 1 :fire:\\n' + f'3 runs  -12 :white_check_mark: 4 :zzz: 5 :x: 6 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -1889,9 +1924,9 @@ def test_publish_check_with_cases(self): '{' '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' '"summary": "' - f'1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"duration of all tests\\")\\n' - f'7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' - f'3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"passed tests\\")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"skipped / disabled tests\\")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"failed tests\\")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols \\"test errors\\")\\n' + f'1 files\u2004\u2003{digit_space}1 errors\u2004\u20032 suites\u2004\u2003\u20027s :stopwatch:\\n' + f'7 tests\u2003{digit_space}1 :white_check_mark:\u20032 :zzz:\u20033 :x:\u20031 :fire:\\n' + f'3 runs\u200a\u2003-12 :white_check_mark:\u20034 :zzz:\u20035 :x:\u20036 :fire:\\n' '\\n' 'Results for commit commit.\\n", ' '"conclusion": "conclusion", ' @@ -2233,9 +2268,9 @@ def test_publish_job_summary_without_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests")\n' - f'22 tests\u20034 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20035 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u2003\u205f\u20046 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u2003\u205f\u20047 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' - f'38 runs\u2006\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests")\u20039 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests")\u200310 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests")\u200311 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors")\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s :stopwatch:\n' + f'22 tests\u20034 :white_check_mark:\u20035 :zzz:\u2003{digit_space}6 :x:\u2003{digit_space}7 :fire:\n' + f'38 runs\u200a\u20038 :white_check_mark:\u20039 :zzz:\u200310 :x:\u200311 :fire:\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2264,9 +2299,9 @@ def test_publish_job_summary_with_before(self): (method, args, kwargs) = mock_calls[0] self.assertEqual('add_to_job_summary', method) self.assertEqual(('## title\n' - f'\u205f\u20041 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "duration of all tests") -1s\n' - f'22 tests +2\u2002\u20034 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") \u2006-\u200a1\u2002\u20035 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") +1\u2002\u2003\u205f\u20046 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +1\u2002\u2003\u205f\u20047 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +1\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "passed tests") \u2006-\u200a2\u2002\u20039 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "skipped / disabled tests") ±0\u2002\u200310 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "failed tests") +2\u2002\u200311 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols "test errors") +4\u2002\n' + f'{digit_space}1 files\u2004 \u2006-\u200a1\u2002\u20032 suites\u2004 \u2006-\u200a1\u2002\u2003\u20023s :stopwatch: -1s\n' + f'22 tests +2\u2002\u20034 :white_check_mark: \u2006-\u200a1\u2002\u20035 :zzz: +1\u2002\u2003{digit_space}6 :x: +1\u2002\u2003{digit_space}7 :fire: +1\u2002\n' + f'38 runs\u200a +1\u2002\u20038 :white_check_mark: \u2006-\u200a2\u2002\u20039 :zzz: ±0\u2002\u200310 :x: +2\u2002\u200311 :fire: +4\u2002\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2289,9 +2324,9 @@ def test_publish_comment(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against base commit base.\n' '\n' @@ -2332,9 +2367,9 @@ def test_publish_comment_without_base(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -2357,9 +2392,9 @@ def test_publish_comment_without_compare(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' - f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003\u205f\u20046 {failed_tests_label_md}\u2003\u205f\u20047 {test_errors_label_md}\n' - f'38 runs\u2006\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' + f'{digit_space}1 files\u2004\u20032 suites\u2004\u2003\u20023s {duration_label_md}\n' + f'22 {all_tests_label_md}\u20034 {passed_tests_label_md}\u20035 {skipped_tests_label_md}\u2003{digit_space}6 {failed_tests_label_md}\u2003{digit_space}7 {test_errors_label_md}\n' + f'38 runs\u200a\u20038 {passed_tests_label_md}\u20039 {skipped_tests_label_md}\u200310 {failed_tests_label_md}\u200311 {test_errors_label_md}\n' '\n' 'Results for commit commit.\n' '\n' @@ -2383,9 +2418,9 @@ def test_publish_comment_with_check_run_with_annotations(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003\u205f\u20046 {failed_tests_label_md} +4\u2002\u2003\u205f\u20047 {test_errors_label_md} +\u205f\u20044\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u2003{digit_space}6 {failed_tests_label_md} +4\u2002\u2003{digit_space}7 {test_errors_label_md} +{digit_space}4\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u200310 {failed_tests_label_md} +6\u2002\u200311 {test_errors_label_md} +10\u2002\n' '\n' 'For more details on these failures and errors, see [this check](http://check-run.url).\n' '\n' @@ -2415,9 +2450,9 @@ def test_publish_comment_with_check_run_without_annotations(self): pr.create_issue_comment.assert_called_once_with( '## Comment Title\n' - f'\u205f\u20041 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' - f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a\u205f\u20048\u2002\u20035 {skipped_tests_label_md} +1\u2002\u20030 {failed_tests_label_md} \u2006-\u200a2\u2002\n' - f'38 runs\u2006 +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u20030 {failed_tests_label_md} \u2006-\u200a4\u2002\n' + f'{digit_space}1 files\u2004 ±0\u2002\u20032 suites\u2004 ±0\u2002\u2003\u20023s {duration_label_md} ±0s\n' + f'22 {all_tests_label_md} +1\u2002\u20034 {passed_tests_label_md} \u2006-\u200a{digit_space}8\u2002\u20035 {skipped_tests_label_md} +1\u2002\u20030 {failed_tests_label_md} \u2006-\u200a2\u2002\n' + f'38 runs\u200a +1\u2002\u20038 {passed_tests_label_md} \u2006-\u200a17\u2002\u20039 {skipped_tests_label_md} +2\u2002\u20030 {failed_tests_label_md} \u2006-\u200a4\u2002\n' '\n' 'Results for commit commit.\u2003± Comparison against base commit base.\n' '\n' @@ -2620,6 +2655,15 @@ def test_get_pull_request_comments_order_updated(self): 'Results for commit dee59820.\u2003± Comparison against base commit 70b5dd18.\n', 'isMinimized': False }, + # malformed comments + { + 'id': 'comment nine', + 'author': None, + }, + { + 'id': 'comment ten', + 'author': {}, + }, ] def test_get_action_comments(self): From cbe23ce54f5e7e3d6e7e1565b70375a0278a252e Mon Sep 17 00:00:00 2001 From: Adam Hernandez Date: Fri, 22 Dec 2023 09:45:16 -0700 Subject: [PATCH 28/28] Change instances of EnricoMi to im-open --- .github/FUNDING.yml | 2 +- .github/actions/test/action.yml | 2 +- .github/workflows/badges.yml | 8 ++++---- .github/workflows/ci-cd.yml | 6 +++--- .github/workflows/publish.yml | 6 +++--- .github/workflows/test-results.yml | 4 ++-- Dockerfile | 4 ++-- README.md | 10 +++++----- action.yml | 4 ++-- composite/action.yml | 18 +++++++++--------- misc/action/fetch-workflows/action.yml | 4 ++-- misc/action/find-workflows/action.yml | 2 +- misc/action/json-output/action.yml | 2 +- misc/action/package-downloads/action.yml | 2 +- python/publish/__init__.py | 2 +- python/publish/publisher.py | 2 +- python/publish_test_results.py | 10 +++++----- python/test/test_action_script.py | 10 +++++----- python/test/test_publisher.py | 2 +- 19 files changed, 50 insertions(+), 50 deletions(-) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 26a32499..f44526a2 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1 @@ -github: EnricoMi +github: im-open diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml index 606c9a10..319d77ee 100644 --- a/.github/actions/test/action.yml +++ b/.github/actions/test/action.yml @@ -1,5 +1,5 @@ name: 'Test' -author: 'EnricoMi' +author: 'im-open' description: 'A GitHub Action that tests this action' inputs: diff --git a/.github/workflows/badges.yml b/.github/workflows/badges.yml index 0bcfa397..b1357217 100644 --- a/.github/workflows/badges.yml +++ b/.github/workflows/badges.yml @@ -40,14 +40,14 @@ jobs: uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + gistURL: https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395 file: downloads.svg - name: Upload JSON to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + gistURL: https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395 file: downloads.json workflows: @@ -80,12 +80,12 @@ jobs: uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + gistURL: https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395 file: workflows.svg - name: Upload JSON to Gist uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + gistURL: https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395 file: workflows.json diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index ec584696..9e27d5e6 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -66,7 +66,7 @@ jobs: name: Configure Deployment needs: [test-mac, test-lnx, test-win] # do not build or deploy on forked repositories - if: github.repository_owner == 'EnricoMi' + if: github.repository_owner == 'im-open' runs-on: ubuntu-latest outputs: image: ${{ steps.action.outputs.image }} @@ -104,14 +104,14 @@ jobs: needs: [publish, config-deploy] # do not build or deploy on forked repositories - if: github.repository_owner == 'EnricoMi' + if: github.repository_owner == 'im-open' runs-on: ubuntu-latest steps: - name: Docker meta id: docker-meta uses: docker/metadata-action@v5 with: - images: ghcr.io/EnricoMi/publish-unit-test-result-action + images: ghcr.io/im-open/publish-unit-test-result-action flavor: | latest=false prefix=v diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index feaae1ea..68479f35 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -75,7 +75,7 @@ jobs: load: true push: false platforms: linux/${{ matrix.arch }} - tags: enricomi/publish-unit-test-result-action:latest + tags: im-open/publish-unit-test-result-action:latest outputs: type=docker - name: Download Artifacts @@ -188,7 +188,7 @@ jobs: -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ - enricomi/publish-unit-test-result-action:latest + im-open/publish-unit-test-result-action:latest shell: bash - name: JSON output @@ -201,7 +201,7 @@ jobs: id: scan uses: crazy-max/ghaction-container-scan@v3 with: - image: enricomi/publish-unit-test-result-action:latest + image: im-open/publish-unit-test-result-action:latest dockerfile: ./Dockerfile annotations: true - name: Upload SARIF artifact diff --git a/.github/workflows/test-results.yml b/.github/workflows/test-results.yml index 6dd2d3f2..3d839b59 100644 --- a/.github/workflows/test-results.yml +++ b/.github/workflows/test-results.yml @@ -25,7 +25,7 @@ jobs: - name: Publish Test Results id: test-results - uses: EnricoMi/publish-unit-test-result-action/composite@master + uses: im-open/publish-unit-test-result-action/composite@master with: commit: ${{ github.event.workflow_run.head_sha }} check_name: Test Results (reference) @@ -67,5 +67,5 @@ jobs: uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395 + gistURL: https://gist.githubusercontent.com/im-open/612cb538c14731f1a8fefe504f519395 file: tests.svg diff --git a/Dockerfile b/Dockerfile index 455b6ede..9069014d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ FROM python:3.8-alpine -LABEL repository="https://github.com/EnricoMi/publish-unit-test-result-action" -LABEL homepage="https://github.com/EnricoMi/publish-unit-test-result-action" +LABEL repository="https://github.com/im-open/publish-unit-test-result-action" +LABEL homepage="https://github.com/im-open/publish-unit-test-result-action" LABEL maintainer="Enrico Minack " LABEL com.github.actions.name="Publish Test Results" diff --git a/README.md b/README.md index deac75c7..4bd94374 100644 --- a/README.md +++ b/README.md @@ -355,7 +355,7 @@ Here is an example JSON: ```json { "title": "4 parse errors, 4 errors, 23 fail, 18 skipped, 227 pass in 39m 12s", - "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", + "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/im-open/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", "conclusion": "success", "stats": { "files": 24, @@ -784,9 +784,9 @@ You can then use the badge via this URL: https://gist.githubusercontent.com/{use It is known that this action works best with relative paths (e.g. `test-results/**/*.xml`), but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the composite variant -of this action (`uses: EnricoMi/publish-unit-test-result-action/composite@v2`). +of this action (`uses: im-open/publish-unit-test-result-action/composite@v2`). -If you have to use absolute paths with the non-composite variant of this action (`uses: EnricoMi/publish-unit-test-result-action@v2`), +If you have to use absolute paths with the non-composite variant of this action (`uses: im-open/publish-unit-test-result-action@v2`), you have to copy files to a relative path first, and then use the relative path: ```yaml @@ -797,7 +797,7 @@ you have to copy files to a relative path first, and then use the relative path: shell: bash - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action@v2 + uses: im-open/publish-unit-test-result-action@v2 if: always() with: files: | @@ -870,7 +870,7 @@ publish-test-results: path: artifacts - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action/composite@v2 + uses: im-open/publish-unit-test-result-action/composite@v2 with: files: "artifacts/**/*.xml" ``` diff --git a/action.yml b/action.yml index fa2f717c..75c6796c 100644 --- a/action.yml +++ b/action.yml @@ -1,5 +1,5 @@ name: 'Publish Test Results' -author: 'EnricoMi' +author: 'im-open' description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' inputs: @@ -148,7 +148,7 @@ outputs: runs: using: 'docker' - image: 'docker://ghcr.io/enricomi/publish-unit-test-result-action:v2.12.0' + image: 'docker://ghcr.io/im-open/publish-unit-test-result-action:v2.12.0' branding: icon: 'check-circle' diff --git a/composite/action.yml b/composite/action.yml index f9651c11..1672f4af 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -1,5 +1,5 @@ name: 'Publish Test Results' -author: 'EnricoMi' +author: 'im-open' description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' inputs: @@ -201,7 +201,7 @@ runs: continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: enricomi-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-fc884bb0b8d89fb24ccb9a84a3d97821 + key: im-open-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-fc884bb0b8d89fb24ccb9a84a3d97821 - name: Create virtualenv id: venv @@ -212,13 +212,13 @@ runs: echo '##[group]Create virtualenv' # install virtualenv, if it is not yet installed python3 -m pip install $PIP_OPTIONS virtualenv - python3 -m virtualenv enricomi-publish-action-venv + python3 -m virtualenv im-open-publish-action-venv # test activating virtualenv case "$RUNNER_OS" in Linux*|macOS*) - source enricomi-publish-action-venv/bin/activate;; + source im-open-publish-action-venv/bin/activate;; Windows*) - source enricomi-publish-action-venv\\Scripts\\activate;; + source im-open-publish-action-venv\\Scripts\\activate;; esac which python3 echo '##[endgroup]' @@ -234,9 +234,9 @@ runs: # activate virtualenv case "$RUNNER_OS" in Linux*|macOS*) - source enricomi-publish-action-venv/bin/activate;; + source im-open-publish-action-venv/bin/activate;; Windows*) - source enricomi-publish-action-venv\\Scripts\\activate;; + source im-open-publish-action-venv\\Scripts\\activate;; esac fi which python3 @@ -254,9 +254,9 @@ runs: # activate virtualenv case "$RUNNER_OS" in Linux*|macOS*) - source enricomi-publish-action-venv/bin/activate;; + source im-open-publish-action-venv/bin/activate;; Windows*) - source enricomi-publish-action-venv\\Scripts\\activate;; + source im-open-publish-action-venv\\Scripts\\activate;; esac python3 $GITHUB_ACTION_PATH/../python/publish_test_results.py echo '##[endgroup]' diff --git a/misc/action/fetch-workflows/action.yml b/misc/action/fetch-workflows/action.yml index f674da88..0dd85efe 100644 --- a/misc/action/fetch-workflows/action.yml +++ b/misc/action/fetch-workflows/action.yml @@ -1,5 +1,5 @@ name: 'Fetch workflows' -author: 'EnricoMi' +author: 'im-open' description: 'A GitHub Action to find workflows matching a query' outputs: @@ -16,7 +16,7 @@ runs: run: | for i in {1..60} do - workflows=$(curl -s https://github.com/EnricoMi/publish-unit-test-result-action | (grep "Used by" || true) | sed -e "s/.*title=//" -e 's/["]//g' | cut -d " " -f 1) + workflows=$(curl -s https://github.com/im-open/publish-unit-test-result-action | (grep "Used by" || true) | sed -e "s/.*title=//" -e 's/["]//g' | cut -d " " -f 1) if [ -n "$workflows" ] then echo "total=$workflows" >> $GITHUB_OUTPUT diff --git a/misc/action/find-workflows/action.yml b/misc/action/find-workflows/action.yml index 04662c96..7bbda5e4 100644 --- a/misc/action/find-workflows/action.yml +++ b/misc/action/find-workflows/action.yml @@ -1,5 +1,5 @@ name: 'Find workflows' -author: 'EnricoMi' +author: 'im-open' description: 'A GitHub Action to find workflows matching a query' inputs: diff --git a/misc/action/json-output/action.yml b/misc/action/json-output/action.yml index 8adc2147..b1957d6f 100644 --- a/misc/action/json-output/action.yml +++ b/misc/action/json-output/action.yml @@ -1,5 +1,5 @@ name: 'Assert JSON output' -author: 'EnricoMi' +author: 'im-open' description: 'A GitHub Action that asserts the publish action''s JSON output' inputs: diff --git a/misc/action/package-downloads/action.yml b/misc/action/package-downloads/action.yml index f310f340..7fafddaf 100644 --- a/misc/action/package-downloads/action.yml +++ b/misc/action/package-downloads/action.yml @@ -1,5 +1,5 @@ name: 'GHCR package downloads' -author: 'EnricoMi' +author: 'im-open' description: 'A GitHub Action that fetches the number of downloads of a GHCR package' inputs: diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 05e70d36..604ca2f6 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -440,7 +440,7 @@ def get_test_summary(): def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str: return '[{label}]({link} "{tooltip}")'.format( label=label, - link=f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols', + link=f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols', tooltip=tooltip ) diff --git a/python/publish/publisher.py b/python/publish/publisher.py index 2b5ec7c8..2cb6df42 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -206,7 +206,7 @@ def publish(self, logger.info('This action is running on a pull_request event for a fork repository. ' 'Pull request comments and check runs cannot be created, so disabling these features. ' 'To fully run the action on fork repository pull requests, see ' - f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') + f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') if self._settings.compare_earlier: before_commit_sha = get_json_path(self._settings.event, 'before') logger.debug(f'comparing against before={before_commit_sha}') diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 9bf3dd11..95cf1b83 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -89,7 +89,7 @@ def expand_glob(pattern: Optional[str], file_format: Optional[str], gha: GithubA gha.warning(f'Could not find any{file_format} files for {prettyfied_pattern}') if has_absolute_patterns: gha.warning(f'Your file pattern contains absolute paths, please read the notes on absolute paths:') - gha.warning(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths') + gha.warning(f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths') else: logger.info(f'Reading{file_format} files {prettyfied_pattern} ({get_number_of_files(files)}, {get_files_size(files)})') logger.debug(f'reading{file_format} files {list(files)}') @@ -190,7 +190,7 @@ def parse_files(settings: Settings, gha: GithubAction) -> ParsedUnitTestResultsW elems = [] # parse files, log the progress - # https://github.com/EnricoMi/publish-unit-test-result-action/issues/304 + # https://github.com/im-open/publish-unit-test-result-action/issues/304 with progress_logger(items=len(files + junit_files + nunit_files + xunit_files + trx_files), interval_seconds=10, progress_template='Read {progress} files in {time}', @@ -235,12 +235,12 @@ def main(settings: Settings, gha: GithubAction) -> None: gha.warning(f'This action is running on a pull_request event for a fork repository. ' f'The only useful thing it can do in this situation is creating a job summary, which is disabled in settings. ' f'To fully run the action on fork repository pull requests, see ' - f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') + f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') return # log the available RAM to help spot OOM issues: - # https://github.com/EnricoMi/publish-unit-test-result-action/issues/231 - # https://github.com/EnricoMi/publish-unit-test-result-action/issues/304 + # https://github.com/im-open/publish-unit-test-result-action/issues/231 + # https://github.com/im-open/publish-unit-test-result-action/issues/304 avail_mem = humanize.naturalsize(psutil.virtual_memory().available, binary=True) logger.info(f'Available memory to read files: {avail_mem}') diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index 26992c0c..b8e09277 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -1138,16 +1138,16 @@ def test_parse_files_no_matches(self): gha.warning.assert_has_calls([ mock.call(f'Could not find any JUnit XML files for {missing_junit}'), mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), - mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any NUnit XML files for {missing_nunit}'), mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), - mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any XUnit XML files for {missing_xunit}'), mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), - mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), mock.call(f'Could not find any TRX files for {missing_trx}'), mock.call(f'Your file pattern contains absolute paths, please read the notes on absolute paths:'), - mock.call(f'https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), + mock.call(f'https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths'), ]) gha.error.assert_not_called() @@ -1248,7 +1248,7 @@ def do_raise(*args): mock.call('This action is running on a pull_request event for a fork repository. ' 'The only useful thing it can do in this situation is creating a job summary, ' 'which is disabled in settings. To fully run the action on fork repository pull requests, ' - f'see https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}' + f'see https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}' '/README.md#support-fork-repositories-and-dependabot-branches'), mock.call('At least one of the FILES, JUNIT_FILES, NUNIT_FILES, XUNIT_FILES, ' 'or TRX_FILES options has to be set! ' diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index c403a1bd..80b42214 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -453,7 +453,7 @@ def test_publish_with_fork(self): mock.call('This action is running on a pull_request event for a fork repository. ' 'Pull request comments and check runs cannot be created, so disabling these features. ' 'To fully run the action on fork repository pull requests, ' - f'see https://github.com/EnricoMi/publish-unit-test-result-action/blob/{__version__}' + f'see https://github.com/im-open/publish-unit-test-result-action/blob/{__version__}' '/README.md#support-fork-repositories-and-dependabot-branches') ], l.info.call_args_list)