Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add pytest job to _Test-OCI-Factory workflow #224

Merged
merged 16 commits into from
Aug 30, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions .github/workflows/_Test-OCI-Factory.yaml
clay-lake marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,53 @@ on:
- "!src/workflow-engine/**"
- "!src/cli-client/**"

env:
# local path to clone the oci-factory to

# path of pytest junit output
PYTEST_RESULT_PATH: pytest_results.xml


jobs:

pytest:
# Trigger python unit tests across the repository
name: pytest
runs-on: ubuntu-22.04
steps:

# Job Setup
- uses: actions/checkout@v4
with:
fetch-depth: 1

- uses: actions/setup-python@v5
with:
python-version: "3.x"

# Note: Add additional dependency installation lines as required below
# test-oci-factory/pytest requirements
- run: pip install -r tests/etc/requirements.txt


- name: Run pytest
continue-on-error: true
run: |
python3 -m pytest --junit-xml "${{ env.PYTEST_RESULT_PATH }}"

- name: Generate Summary
if: ${{ !cancelled() }}
run: |
python3 -m tools.junit_to_markdown --input-junit "${{ env.PYTEST_RESULT_PATH }}" >> $GITHUB_STEP_SUMMARY

- name: Upload pytest Result
if: ${{ !cancelled() }}
uses: actions/upload-artifact@v4
with:
name: ${{ env.PYTEST_RESULT_PATH }}
path: ${{ env.PYTEST_RESULT_PATH }}
if-no-files-found: error

test-workflows:
name: Trigger internal tests for mock-rock
uses: ./.github/workflows/Image.yaml
Expand Down
3 changes: 3 additions & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from pathlib import Path

DATA_DIR = Path(__file__).parent / "data"
18 changes: 18 additions & 0 deletions tests/data/junit_xml_failure.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite name="pytest" errors="0" failures="1" skipped="0" tests="6" time="0.127" timestamp="2024-08-13T11:57:19.793644+00:00" hostname="fv-az1756-954">
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_get_target_archs" time="0.001" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices_fallback_exception" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_configure_matrices_lpci_fallback" time="0.000" />
<testcase classname="oci-factory.src.build-rock.configure.test.test_generate_matrix" name="test_set_build_config_outputs" time="0.001" />
<testcase classname="oci-factory.src.docs.test.test_generate_oci_doc_yaml" name="test_example_failure" time="0.001">
<failure message="AssertionError: This is to exemplify the output of a failed unit test&#10;assert False">def test_example_failure():
&gt; assert False, "This is to exemplify the output of a failed unit test"
E AssertionError: This is to exemplify the output of a failed unit test
E assert False
oci-factory/src/docs/test/test_generate_oci_doc_yaml.py:8: AssertionError
</failure>
</testcase>
</testsuite>
</testsuites>
1 change: 1 addition & 0 deletions tests/etc/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pytest==8.3.2
Empty file added tests/fixtures/__ini__.py
Empty file.
9 changes: 9 additions & 0 deletions tests/fixtures/buffers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import pytest
from io import StringIO


@pytest.fixture
def str_buff():
"""String IO fixture for simulating a file object"""
with StringIO() as buffer:
yield buffer
13 changes: 13 additions & 0 deletions tests/fixtures/junit_et.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import pytest
import xml.etree.ElementTree as ET
from .. import DATA_DIR


@pytest.fixture
def junit_with_failure():
"""Load ET of junit xml report with failure"""
sample = DATA_DIR / "junit_xml_failure.xml"

tree = ET.parse(sample)
root = tree.getroot()
return root
Empty file added tests/integration/__init__.py
Empty file.
18 changes: 18 additions & 0 deletions tests/integration/test_convert_junit_xml_to_markdown.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from ..fixtures.buffers import str_buff
from ..fixtures.junit_et import junit_with_failure
import tools.junit_to_markdown.convert as report


def test_print_redirection(junit_with_failure, str_buff, capsys):
"""Ensure that the report is entirely redirected when needed"""

report.print_junit_report(junit_with_failure, str_buff)
report.print_junit_report(junit_with_failure, None) # print report to stdout

str_buff.seek(0)
str_buff_content = str_buff.read()

captured = capsys.readouterr()
stdout_content = captured.out

assert stdout_content == str_buff_content, "Printing to multiple locations."
Empty file.
25 changes: 25 additions & 0 deletions tools/junit_to_markdown/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import argparse, sys
import xml.etree.ElementTree as ET
from .convert import print_junit_report


parser = argparse.ArgumentParser(
description="Generate markdown from a JUnit XML report for $GITHUB_STEP_SUMMARY"
)

parser.add_argument(
"--input-junit", help="Path to JUnit XML Report", required=True, type=str
)


def main():
args = parser.parse_args()

tree = ET.parse(args.input_junit)
root = tree.getroot()

print_junit_report(root, sys.stdout)


if __name__ == "__main__":
main()
132 changes: 132 additions & 0 deletions tools/junit_to_markdown/convert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
#! /bin/env python3
import xml.etree.ElementTree as ET
from io import TextIOBase
import json


def print_element(element: ET.Element, output: TextIOBase = None):
"""Generically display attrs and text of a element"""
print(f"<pre>", file=output)

for key, value in element.attrib.items():
print(f"{key}: {value}", file=output)

if element.text is not None:
if content := element.text.strip():
print(f"text: \n{content}", file=output)

print(f"</pre>", file=output)


def print_testsuite_pie_chart(testsuite: ET.Element, output: TextIOBase = None):
"""Generate a pie chart showing test status from testsuite element"""

failed_tests = int(testsuite.attrib.get("failures", 0))
error_tests = int(testsuite.attrib.get("errors", 0))
skipped_tests = int(testsuite.attrib.get("skipped", 0))
total_tests = int(testsuite.attrib.get("tests", 0))

# passed test has to be inferred
pass_tests = total_tests - failed_tests - error_tests - skipped_tests

# disable black autoformatter for a moment
# fmt: off

# name, value, colour, default_order
chart_data = [
("failed", failed_tests, "#f00", 1),
("error", error_tests, "#fa0", 2),
("skipped", skipped_tests, "#ff0", 3),
("pass", pass_tests, "#0f0", 4),
]
# note: default_order ensures color match if two wedges have the exact same value
# fmt: on

# filter out wedges with 0 width
chart_data = list(filter(lambda w: w[1] != 0, chart_data))

# sort by value, then default order so colors match what we expect
chart_data = list(sorted(chart_data, key=lambda w: (w[1], w[3]), reverse=True))

# create the chart theme
theme_dict = {
"theme": "base",
"themeVariables": {f"pie{n+1}": w[2] for n, w in enumerate(chart_data)},
}

# begin printing pie chart...
print("```mermaid", file=output)

# theme colors in order: pass, failed, error, skipped
# Note: init cannot be in quotes
print(f"%%{{init:{json.dumps(theme_dict)}}}%%", file=output)

print(f"pie", file=output)
for key, value, _, _ in chart_data:
print(f'"{key}" : {value}', file=output)

print("```", file=output)


def get_testcase_status(testcase: ET.Element):
"""Get status for individual testcase elements"""

if testcase.find("failure") is not None:
return ":x:"
elif testcase.find("error") is not None:
return ":warning:"
elif testcase.find("skipped") is not None:
return ":information_source:"
else: # passed
return ":white_check_mark:"


def print_header(testsuite: ET.Element, output: TextIOBase = None):
"""Print a header for the summary"""
passed = (
testsuite.attrib.get("failures") == "0"
and testsuite.attrib.get("errors") == "0"
)
status = ":white_check_mark:" if passed else ":x:"
name = testsuite.attrib["name"]

print(f"# {status} {name}", file=output)


def print_testsuite_report(testsuite: ET.Element, output: TextIOBase = None):
"""Print complete testsuite element Report"""

print_header(testsuite, output)

# use pie chart header as title
print_testsuite_pie_chart(testsuite, output)

# print testsuite info
print_element(testsuite, output)

# print each test case in collapsable section
for testcase in testsuite.findall("testcase"):

print("<details>", file=output)

test_status = get_testcase_status(testcase)
test_name = (
testcase.attrib["name"].replace("_", " ").title()
) # make the title look better
test_class = testcase.attrib["classname"]
print(
f"<summary>{test_status} {test_name} - {test_class}</summary>", file=output
)

for child in testcase.iter():
print(f"<i>{child.tag}</i>", file=output)
print_element(child, output)

print("</details>", file=output)


def print_junit_report(root: ET.Element, output: TextIOBase = None):
"""Print report by iterating over all <testsuite> elements in root"""

for testsuite in root.findall("testsuite"):
print_testsuite_report(testsuite, output)
Loading