Skip to content

Commit

Permalink
surface xfail (#4765)
Browse files Browse the repository at this point in the history
What is the motivation for this PR?
By adding xfail marker, can't tell a real success or a xfail success, this PR is to optimize the test report function to include xfail results.

How did you do it?
Add 4 new result types to test case result, xfail_success, xfail_failure, xfail_error, xfail_skipped. And add one new filed xfails in test report summary.

How did you verify/test it?
Run test to generate test report to have different combinations of xfail cases.
  • Loading branch information
StormLiangMS authored Nov 26, 2021
1 parent 54dec75 commit f432e55
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 13 deletions.
38 changes: 28 additions & 10 deletions test_reporting/junit_xml_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@

# Fields found in the metadata/properties section of the JUnit XML file.
# FIXME: These are specific to pytest, needs to be extended to support spytest.
METADATA_TAG = "properties"
METADATA_PROPERTY_TAG = "property"
PROPERTIES_TAG = "properties"
PROPERTY_TAG = "property"
REQUIRED_METADATA_PROPERTIES = [
"topology",
"testbed",
Expand Down Expand Up @@ -238,13 +238,13 @@ def _validate_test_summary(root):


def _validate_test_metadata(root):
properties_element = root.find("properties")
properties_element = root.find(PROPERTIES_TAG)

if not properties_element:
return

seen_properties = []
for prop in properties_element.iterfind(METADATA_PROPERTY_TAG):
for prop in properties_element.iterfind(PROPERTY_TAG):
property_name = prop.get("name", None)

if not property_name:
Expand Down Expand Up @@ -320,24 +320,31 @@ def _extract_test_summary(test_cases):
test_result_summary = defaultdict(int)
for _, cases in test_cases.items():
for case in cases:
# Error may occur along with other test results, to count error separately.
# The result field is unique per test case, either error or failure.
# xfails is the counter for all kinds of xfail results (include success/failure/error/skipped)
test_result_summary["tests"] += 1
test_result_summary["failures"] += case["result"] == "failure" or case["result"] == "error"
test_result_summary["skipped"] += case["result"] == "skipped"
test_result_summary["errors"] += case["error"]
test_result_summary["time"] += float(case["time"])
test_result_summary["xfails"] += case["result"] == "xfail_failure" or \
case["result"] == "xfail_error" or \
case["result"] == "xfail_skipped" or \
case["result"] == "xfail_success"

test_result_summary = {k: str(v) for k, v in test_result_summary.items()}
return test_result_summary


def _parse_test_metadata(root):
properties_element = root.find(METADATA_TAG)
properties_element = root.find(PROPERTIES_TAG)

if not properties_element:
return {}

test_result_metadata = {}
for prop in properties_element.iterfind("property"):
for prop in properties_element.iterfind(PROPERTY_TAG):
if prop.get("value"):
test_result_metadata[prop.get("name")] = prop.get("value")

Expand All @@ -362,23 +369,34 @@ def _parse_test_case(test_case):
error = test_case.find("error")
skipped = test_case.find("skipped")

# Any test which marked as xfail will drop out a property to the report xml file.
# Add prefix "xfail_" to tests which are marked with xfail
properties_element = test_case.find(PROPERTIES_TAG)
xfail_case = ""
if properties_element:
for prop in properties_element.iterfind(PROPERTY_TAG):
if prop.get("name") == "xfail":
xfail_case = "xfail_"
break

# NOTE: "error" is unique in that it can occur alongside a succesful, failed, or skipped test result.
# Because of this, we track errors separately so that the error can be correlated with the stage it
# occurred.
# By looking into test results from past 300 days, error only occur with skipped test result.
#
# If there is *only* an error tag we note that as well, as this indicates that the framework
# errored out during setup or teardown.
if failure is not None:
result["result"] = "failure"
result["result"] = "{}failure".format(xfail_case)
summary = failure.get("message", "")
elif skipped is not None:
result["result"] = "skipped"
result["result"] = "{}skipped".format(xfail_case)
summary = skipped.get("message", "")
elif error is not None:
result["result"] = "error"
result["result"] = "{}error".format(xfail_case)
summary = error.get("message", "")
else:
result["result"] = "success"
result["result"] = "{}success".format(xfail_case)
summary = ""

result["summary"] = summary[:min(len(summary), MAXIMUM_SUMMARY_SIZE)]
Expand Down
4 changes: 2 additions & 2 deletions test_reporting/kusto/flat_views.kql
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
{
TestReportMetadata
| join kind=innerunique TestReportSummary on ReportId
| project Timestamp, OSVersion, HardwareSku, TotalCasesRun, Successes=(TotalCasesRun - Failures - Skipped),
Failures, Errors, Skipped, TestbedName, TrackingId, TotalRuntime,
| project Timestamp, OSVersion, HardwareSku, TotalCasesRun, Successes=(TotalCasesRun - Failures - Skipped - Xfails),
Failures, Errors, Skipped, Xfails, TestbedName, TrackingId, TotalRuntime,
AsicType, Platform, Topology, ReportId, UploadTimestamp
| sort by Timestamp desc
}
Expand Down
3 changes: 2 additions & 1 deletion test_reporting/kusto/setup.kql
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,14 @@
# 2. Add a JSON mapping for the table #
###############################################################################
.create table TestReportSummary (ReportId: string, TotalCasesRun: int, Failures: int,
Errors: int, Skipped: int, TotalRuntime: double)
Errors: int, Skipped: int, Xfails: int, TotalRuntime: double)

.create table TestReportSummary ingestion json mapping 'FlatSummaryMappingV1' '[{"column":"ReportId","Properties":{"path":"$.id"}},
{"column":"TotalCasesRun","Properties":{"path":"$.tests"}},
{"column":"Failures","Properties":{"path":"$.failures"}},
{"column":"Errors","Properties":{"path":"$.errors"}},
{"column":"Skipped","Properties":{"path":"$.skipped"}},
{"column":"Xfails","Properties":{"path":"$.xfails"}},
{"column":"TotalRuntime","Properties":{"path":"$.time"}}]'

###############################################################################
Expand Down
2 changes: 2 additions & 0 deletions tests/common/plugins/conditional_mark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,8 @@ def pytest_collection_modifyitems(session, config, items):
if mark_name == 'xfail':
strict = mark_details.get('strict', False)
mark = getattr(pytest.mark, mark_name)(reason=reason, strict=strict)
# To generate xfail property in the report xml file
item.user_properties.append(('xfail', strict))
else:
mark = getattr(pytest.mark, mark_name)(reason=reason)

Expand Down

0 comments on commit f432e55

Please sign in to comment.