Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add pytest job to _Test-OCI-Factory workflow #224

Merged
merged 16 commits into from
Aug 30, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions .github/workflows/_Test-OCI-Factory.yaml
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,58 @@ on:
- "!src/workflow-engine/**"
- "!src/cli-client/**"

env:
# local path to clone the oci-factory to
OCI_FACTORY_DIR: oci-factory/
clay-lake marked this conversation as resolved.
Show resolved Hide resolved

# path of pytest junit output
PYTEST_RESULT_PATH: pytest_results.xml


jobs:

pytest:
# Trigger python unit tests across the repository
name: pytest
runs-on: ubuntu-22.04
steps:

# Job Setup
- uses: actions/checkout@v4
with:
path: ${{ env.OCI_FACTORY_DIR }}
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
fetch-depth: 1

- uses: actions/setup-python@v5
with:
python-version: "3.x"

# Note: Add additional dependency installation lines as required below
# test-oci-factory/pytest requirements
- run: pip install -r ${{ env.OCI_FACTORY_DIR }}/src/test-oci-factory/pytest/requirements.txt
clay-lake marked this conversation as resolved.
Show resolved Hide resolved


- name: Run pytest
continue-on-error: true
run: |
python3 -m pytest --junit-xml "${{ env.PYTEST_RESULT_PATH }}" "${{ env.OCI_FACTORY_DIR }}"

- name: Generate Summary
if: always()
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
run: |
python3 ${{ env.OCI_FACTORY_DIR }}/src/test-oci-factory/pytest/format_markdown_report.py --input-junit "${{ env.PYTEST_RESULT_PATH }}" >> $GITHUB_STEP_SUMMARY

- name: Upload pytest Result
if: always()
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
uses: actions/upload-artifact@v4
with:
name: ${{ env.PYTEST_RESULT_PATH }}
path: ${{ env.PYTEST_RESULT_PATH }}
if-no-files-found: error




clay-lake marked this conversation as resolved.
Show resolved Hide resolved
test-workflows:
name: Trigger internal tests for mock-rock
uses: ./.github/workflows/Image.yaml
Expand Down
18 changes: 18 additions & 0 deletions src/test-oci-factory/pytest/data/sample_failure.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite name="pytest" errors="0" failures="1" skipped="0" tests="6" time="0.127" timestamp="2024-08-13T11:57:19.793644+00:00" hostname="fv-az1756-954">
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_get_target_archs" time="0.001" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices_fallback_exception" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices_lpci_fallback" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_set_build_config_outputs" time="0.001" />
<testcase classname="oci-factory.src.docs.test.test_generate_oci_doc_yaml" name="test_example_failure" time="0.001">
<failure message="AssertionError: This is to exemplify the output of a failed unit test&#10;assert False">def test_example_failure():
&gt; assert False, "This is to exemplify the output of a failed unit test"
E AssertionError: This is to exemplify the output of a failed unit test
E assert False
oci-factory/src/docs/test/test_generate_oci_doc_yaml.py:8: AssertionError
</failure>
</testcase>
</testsuite>
</testsuites>
148 changes: 148 additions & 0 deletions src/test-oci-factory/pytest/format_markdown_report.py
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
#! /bin/env python3
import xml.etree.ElementTree as ET
from io import TextIOBase
import json

"""
Generate markdown from a JUnit XML report for $GITHUB_STEP_SUMMARY
"""


def print_element(element: ET.Element, output: TextIOBase = None):
"""Generically display attrs and text of a element"""
print(f"<pre>", file=output)

for key, value in element.attrib.items():
print(f"{key}: {value}", file=output)

if element.text is not None:
if content := element.text.strip():
print(f"text: \n{content}", file=output)

print(f"</pre>", file=output)


def print_testsuite_pie_chart(testsuite: ET.Element, output: TextIOBase = None):
"""Generate a pie chart showing test status from testsuite element"""

failed_tests = int(testsuite.attrib.get("failures", 0))
error_tests = int(testsuite.attrib.get("errors", 0))
skipped_tests = int(testsuite.attrib.get("skipped", 0))
total_tests = int(testsuite.attrib.get("tests", 0))

# passed test has to be inferred
pass_tests = total_tests - failed_tests - error_tests - skipped_tests

# name, value, colour, default_order
chart_data = [
("failed", failed_tests, "#f00", 1),
("error", error_tests, "#fa0", 2),
("skipped", skipped_tests, "#ff0", 3),
("pass", pass_tests, "#0f0", 4),
]
# note: default_order ensures color match if two wedges have the exact same value

# filter out wedges with 0 width
chart_data = list(filter(lambda w: w[1] != 0, chart_data))

# sort by value, then default order so colors match what we expect
chart_data = list(sorted(chart_data, key=lambda w: (w[1], w[3]), reverse=True))

# create the chart theme
theme_dict = {
"theme": "base",
"themeVariables": {f"pie{n+1}": w[2] for n, w in enumerate(chart_data)},
}

# begin printing pie chart...
print("```mermaid", file=output)

# theme colors in order: pass, failed, error, skipped
# Note: init cannot be in quotes
print(f"%%{{init:{json.dumps(theme_dict)}}}%%", file=output)

print(f"pie", file=output)
for key, value, _, _ in chart_data:
print(f'"{key}" : {value}', file=output)

print("```", file=output)


def get_testcase_status(testcase: ET.Element):
"""Get status for individual testcase elements"""

if testcase.find("failure") is not None:
return ":x:"
elif testcase.find("error") is not None:
return ":warning:"
elif testcase.find("skipped") is not None:
return ":information_source:"
else: # passed
return ":white_check_mark:"


def print_header(testsuite: ET.Element, output: TextIOBase = None):
"""Print a header for the summary"""
passed = (
testsuite.attrib.get("failures") == "0"
and testsuite.attrib.get("errors") == "0"
)
status = ":white_check_mark:" if passed else ":x:"
name = testsuite.attrib["name"]

print(f"# {status} {name}", file=output)


def print_testsuite_report(testsuite: ET.Element, output: TextIOBase = None):
"""Print complete testsuite element Report"""

print_header(testsuite, output)

# use pie chart header as title
print_testsuite_pie_chart(testsuite, output)

# print testsuite info
print_element(testsuite, output)

# print each test case in collapsable section
for testcase in testsuite.findall("testcase"):

print("<details>", file=output)

test_status = get_testcase_status(testcase)
test_name = (
testcase.attrib["name"].replace("_", " ").title()
) # make the title look better
test_class = testcase.attrib["classname"]
print(
f"<summary>{test_status} {test_name} - {test_class}</summary>", file=output
)

for child in testcase.iter():
print(f"<i>{child.tag}</i>", file=output)
print_element(child, output)

print("</details>", file=output)


def print_junit_report(root: ET.Element, output: TextIOBase = None):
"""Print report by iterating over all <testsuite> elements in root"""

for testsuite in root.findall("testsuite"):
print_testsuite_report(testsuite, output)


if __name__ == "__main__":
import argparse, sys

parser = argparse.ArgumentParser()

parser.add_argument(
"--input-junit", help="Path to JUnit XML Report", required=True, type=str
)

args = parser.parse_args()

tree = ET.parse(args.input_junit)
root = tree.getroot()
print_junit_report(root, sys.stdout)
1 change: 1 addition & 0 deletions src/test-oci-factory/pytest/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pytest==8.3.2
41 changes: 41 additions & 0 deletions src/test-oci-factory/pytest/test_format_markdown_report.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#! /bin/env python3

import pytest
import xml.etree.ElementTree as ET
from pathlib import Path
from io import StringIO

import format_markdown_report as report


@pytest.fixture
def sample_failure():
"""Load ET of sample junit xml report with failure"""
sample_path = Path(__file__).parent / "data/sample_failure.xml"

tree = ET.parse(sample_path)
root = tree.getroot()
return root


@pytest.fixture
def str_buff():
"""String IO fixture for simulating a file object"""
with StringIO() as buffer:
yield buffer


def test_print_redirection(sample_failure, str_buff, capsys):
"""Ensure that the report is entirely redirected when needed"""

report.print_junit_report(sample_failure, str_buff)
report.print_junit_report(sample_failure, None) # print report to stdout

str_buff.seek(0)
str_buff_content = str_buff.read()

captured = capsys.readouterr()
stdout_content = captured.out

assert stdout_content == str_buff_content, "Printing to multiple locations."

Loading