From f432e5590d2c820f6138dab475afc0fdeb434fcc Mon Sep 17 00:00:00 2001 From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com> Date: Thu, 25 Nov 2021 21:50:59 -0800 Subject: [PATCH] surface xfail (#4765) What is the motivation for this PR? By adding xfail marker, can't tell a real success or a xfail success, this PR is to optimize the test report function to include xfail results. How did you do it? Add 4 new result types to test case result, xfail_success, xfail_failure, xfail_error, xfail_skipped. And add one new filed xfails in test report summary. How did you verify/test it? Run test to generate test report to have different combinations of xfail cases. --- test_reporting/junit_xml_parser.py | 38 ++++++++++++++----- test_reporting/kusto/flat_views.kql | 4 +- test_reporting/kusto/setup.kql | 3 +- .../plugins/conditional_mark/__init__.py | 2 + 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/test_reporting/junit_xml_parser.py b/test_reporting/junit_xml_parser.py index 9a368ff1c9..0af4648d2e 100644 --- a/test_reporting/junit_xml_parser.py +++ b/test_reporting/junit_xml_parser.py @@ -52,8 +52,8 @@ # Fields found in the metadata/properties section of the JUnit XML file. # FIXME: These are specific to pytest, needs to be extended to support spytest. -METADATA_TAG = "properties" -METADATA_PROPERTY_TAG = "property" +PROPERTIES_TAG = "properties" +PROPERTY_TAG = "property" REQUIRED_METADATA_PROPERTIES = [ "topology", "testbed", @@ -238,13 +238,13 @@ def _validate_test_summary(root): def _validate_test_metadata(root): - properties_element = root.find("properties") + properties_element = root.find(PROPERTIES_TAG) if not properties_element: return seen_properties = [] - for prop in properties_element.iterfind(METADATA_PROPERTY_TAG): + for prop in properties_element.iterfind(PROPERTY_TAG): property_name = prop.get("name", None) if not property_name: @@ -320,24 +320,31 @@ def _extract_test_summary(test_cases): test_result_summary = defaultdict(int) for _, cases in test_cases.items(): for case in cases: + # Error may occur along with other test results, to count error separately. + # The result field is unique per test case, either error or failure. + # xfails is the counter for all kinds of xfail results (include success/failure/error/skipped) test_result_summary["tests"] += 1 test_result_summary["failures"] += case["result"] == "failure" or case["result"] == "error" test_result_summary["skipped"] += case["result"] == "skipped" test_result_summary["errors"] += case["error"] test_result_summary["time"] += float(case["time"]) + test_result_summary["xfails"] += case["result"] == "xfail_failure" or \ + case["result"] == "xfail_error" or \ + case["result"] == "xfail_skipped" or \ + case["result"] == "xfail_success" test_result_summary = {k: str(v) for k, v in test_result_summary.items()} return test_result_summary def _parse_test_metadata(root): - properties_element = root.find(METADATA_TAG) + properties_element = root.find(PROPERTIES_TAG) if not properties_element: return {} test_result_metadata = {} - for prop in properties_element.iterfind("property"): + for prop in properties_element.iterfind(PROPERTY_TAG): if prop.get("value"): test_result_metadata[prop.get("name")] = prop.get("value") @@ -362,23 +369,34 @@ def _parse_test_case(test_case): error = test_case.find("error") skipped = test_case.find("skipped") + # Any test which marked as xfail will drop out a property to the report xml file. + # Add prefix "xfail_" to tests which are marked with xfail + properties_element = test_case.find(PROPERTIES_TAG) + xfail_case = "" + if properties_element: + for prop in properties_element.iterfind(PROPERTY_TAG): + if prop.get("name") == "xfail": + xfail_case = "xfail_" + break + # NOTE: "error" is unique in that it can occur alongside a succesful, failed, or skipped test result. # Because of this, we track errors separately so that the error can be correlated with the stage it # occurred. + # By looking into test results from past 300 days, error only occur with skipped test result. # # If there is *only* an error tag we note that as well, as this indicates that the framework # errored out during setup or teardown. if failure is not None: - result["result"] = "failure" + result["result"] = "{}failure".format(xfail_case) summary = failure.get("message", "") elif skipped is not None: - result["result"] = "skipped" + result["result"] = "{}skipped".format(xfail_case) summary = skipped.get("message", "") elif error is not None: - result["result"] = "error" + result["result"] = "{}error".format(xfail_case) summary = error.get("message", "") else: - result["result"] = "success" + result["result"] = "{}success".format(xfail_case) summary = "" result["summary"] = summary[:min(len(summary), MAXIMUM_SUMMARY_SIZE)] diff --git a/test_reporting/kusto/flat_views.kql b/test_reporting/kusto/flat_views.kql index 731a8493ae..4e7d6161d4 100644 --- a/test_reporting/kusto/flat_views.kql +++ b/test_reporting/kusto/flat_views.kql @@ -5,8 +5,8 @@ { TestReportMetadata | join kind=innerunique TestReportSummary on ReportId - | project Timestamp, OSVersion, HardwareSku, TotalCasesRun, Successes=(TotalCasesRun - Failures - Skipped), - Failures, Errors, Skipped, TestbedName, TrackingId, TotalRuntime, + | project Timestamp, OSVersion, HardwareSku, TotalCasesRun, Successes=(TotalCasesRun - Failures - Skipped - Xfails), + Failures, Errors, Skipped, Xfails, TestbedName, TrackingId, TotalRuntime, AsicType, Platform, Topology, ReportId, UploadTimestamp | sort by Timestamp desc } diff --git a/test_reporting/kusto/setup.kql b/test_reporting/kusto/setup.kql index 90cad08585..27817f0868 100644 --- a/test_reporting/kusto/setup.kql +++ b/test_reporting/kusto/setup.kql @@ -55,13 +55,14 @@ # 2. Add a JSON mapping for the table # ############################################################################### .create table TestReportSummary (ReportId: string, TotalCasesRun: int, Failures: int, - Errors: int, Skipped: int, TotalRuntime: double) + Errors: int, Skipped: int, Xfails: int, TotalRuntime: double) .create table TestReportSummary ingestion json mapping 'FlatSummaryMappingV1' '[{"column":"ReportId","Properties":{"path":"$.id"}}, {"column":"TotalCasesRun","Properties":{"path":"$.tests"}}, {"column":"Failures","Properties":{"path":"$.failures"}}, {"column":"Errors","Properties":{"path":"$.errors"}}, {"column":"Skipped","Properties":{"path":"$.skipped"}}, + {"column":"Xfails","Properties":{"path":"$.xfails"}}, {"column":"TotalRuntime","Properties":{"path":"$.time"}}]' ############################################################################### diff --git a/tests/common/plugins/conditional_mark/__init__.py b/tests/common/plugins/conditional_mark/__init__.py index b3bc25c556..4bacd32843 100644 --- a/tests/common/plugins/conditional_mark/__init__.py +++ b/tests/common/plugins/conditional_mark/__init__.py @@ -299,6 +299,8 @@ def pytest_collection_modifyitems(session, config, items): if mark_name == 'xfail': strict = mark_details.get('strict', False) mark = getattr(pytest.mark, mark_name)(reason=reason, strict=strict) + # To generate xfail property in the report xml file + item.user_properties.append(('xfail', strict)) else: mark = getattr(pytest.mark, mark_name)(reason=reason)