Skip to content

Commit

Permalink
Formatted output of non-empty TestRunner error message dictionary ent…
Browse files Browse the repository at this point in the history
…ries sorted by message precedence: "critical" > "error" > "warning" > "skipped" > "info"
  • Loading branch information
RichardBruskiewich committed May 17, 2024
1 parent ac4ec00 commit 2fc53d8
Show file tree
Hide file tree
Showing 2 changed files with 77 additions and 66 deletions.
62 changes: 10 additions & 52 deletions graph_validation_tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ async def run_test_cases(test_cases: List[TestCaseRun]):
# TODO: unsure if one needs to limit concurrent requests here...
await gather([test_case.run_test_case() for test_case in test_cases]) # , limit=num_concurrent_requests)

MESSAGE_PRECEDENCE = ("critical", "error", "warning", "skipped", "info")
FAILURE_MODES = ("error", "critical")

def compute_status(self, tcr: TestCaseRun) -> Tuple[str, TestCaseResultEnum, Dict]:
Expand All @@ -300,12 +301,16 @@ def compute_status(self, tcr: TestCaseRun) -> Tuple[str, TestCaseResultEnum, Dic
message_catalog: MESSAGE_CATALOG = messages_by_test[test]
mtype: str
messages: Dict
non_empty_messages: MESSAGE_CATALOG = {
mtype: messages for mtype, messages in message_catalog.items() if message_catalog[mtype]
}
# Load non-empty messages in order of message precedence
non_empty_messages: MESSAGE_CATALOG = dict()
for mtype in self.MESSAGE_PRECEDENCE:
if mtype in message_catalog and message_catalog[mtype]:
non_empty_messages[mtype] = message_catalog[mtype]
# TODO: this first iteration in which FAILURE_MODES are
# immutable (not sensitive to TestRunner parameters)
if not non_empty_messages or not any([mtype in non_empty_messages for mtype in self.FAILURE_MODES]):
# immutable (not sensitive to TestRunner parameters);
# Maybe we need to treat "SKIPPED" tests differently here(?)
if not non_empty_messages or \
not any([mtype in non_empty_messages for mtype in self.FAILURE_MODES]):
return target, TestCaseResultEnum.PASSED, non_empty_messages
else:
return target, TestCaseResultEnum.FAILED, non_empty_messages
Expand Down Expand Up @@ -541,53 +546,6 @@ async def run_tests(
runner_settings=runner_settings
) for target in components
]
#
# TODO: the following comment is plagiarized from 3rd party TestRunner comments simply as
# a short term source of inspiration for the design of results from this TestRunner
# The ARS_test_Runner with the following command:
#
# ARS_Test_Runner
# --env 'ci'
# --query_type 'treats_creative'
# --expected_output '["TopAnswer","TopAnswer"]'
# --input_curie 'MONDO:0005301'
# --output_curie '["PUBCHEM.COMPOUND:107970","UNII:3JB47N2Q2P"]'
#
# gives a Python dictionary report (serialized to JSON) similar as follows:
#
# {
# "pks": {
# "parent_pk": "e29c5051-d8d7-4e82-a1a1-b3cc9b8c9657",
# "merged_pk": "56e3d5ac-66b4-4560-9f56-7a4d117e8003",
# "aragorn": "14953570-7451-4d1b-a817-fc9e7879b477",
# "arax": "8c88ead6-6cbf-4c9a-9570-ca76392ddb7a",
# "unsecret": "bd084e27-2a0e-4df4-843c-417bfac6f8c7",
# "bte": "d28a4146-9486-4e98-973d-8cdd33270595",
# "improving": "d8d3c905-ec07-491f-a078-7ef0f489a409"
# },
# "results": [
# {
# "PUBCHEM.COMPOUND:107970": {
# "aragorn": "Fail",
# "arax": "Pass",
# "unsecret": "Fail",
# "bte": "Pass",
# "improving": "Pass",
# "ars": "Pass"
# }
# },
# {
# "UNII:3JB47N2Q2P": {
# "aragorn": "Fail",
# "arax": "Pass",
# "unsecret": "Fail",
# "bte": "Pass",
# "improving": "Pass",
# "ars": "Pass"
# }
# }
# ]
# }
results = {
"pks": dict(),
"results": dict()
Expand Down
81 changes: 67 additions & 14 deletions tests/graph_validation_test_runner/test_graph_validation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import List, Dict
from translator_testing_model.datamodel.pydanticmodel import TestAsset
from graph_validation_tests import TestCaseRun, GraphValidationTest
from graph_validation_tests.utils.unit_test_templates import by_subject, by_object
from graph_validation_tests.utils.unit_test_templates import by_subject, by_object, raise_object_entity
from tests import DEFAULT_TRAPI_VERSION, DEFAULT_BMT

import logging
Expand Down Expand Up @@ -103,31 +103,84 @@ def test_test_case_run_report_messages():


def test_format_results():
# create dummy test and test case runs
# with artificially generated validation messages
test_asset_id: str = "TestAsset_1"
test_asset: TestAsset = TestAsset(id=test_asset_id)
gvt: GraphValidationTest = GraphValidationTest(
gvt_0: GraphValidationTest = GraphValidationTest(
test_asset=test_asset
)
tcr_0: TestCaseRun = TestCaseRun(
test_run=gvt_0,
test=by_subject
)
test_cases_0: List[TestCaseRun] = [tcr_0]
formatted_output_0: Dict = gvt_0.format_results(test_cases_0)
assert formatted_output_0
by_subject_test_case_id: str = f"{test_asset_id}-by_subject"
assert formatted_output_0[by_subject_test_case_id]
assert "ars" in formatted_output_0[by_subject_test_case_id]
assert formatted_output_0[by_subject_test_case_id]["ars"]
assert "status" in formatted_output_0[by_subject_test_case_id]["ars"]
assert formatted_output_0[by_subject_test_case_id]["ars"]["status"] == "SKIPPED"
assert not formatted_output_0[by_subject_test_case_id]["ars"]["messages"]

gvt_1: GraphValidationTest = GraphValidationTest(
test_asset=test_asset
)
# create some dummy test runs with artificially generated validation messages
tcr_1: TestCaseRun = TestCaseRun(
test_run=gvt,
test_run=gvt_1,
test=by_subject
)
tcr_1.report("critical.trapi.response.unexpected_http_code", identifier="500")
tcr_2: TestCaseRun = TestCaseRun(
test_run=gvt,
test_run=gvt_1,
test=by_object
)
tcr_2.report(code="error.trapi.response.empty")
tcr_3: TestCaseRun = TestCaseRun(
test_run=gvt_1,
test=raise_object_entity
)
tcr_3.report(
code="info.knowledge_graph.edge.predicate.mixin",
source_trail="my_ara->my_kp->my_ks",
identifier="biolink:treats",
edge_id="a--(treats)->b"
)
tcr_3.report(code="warning.trapi.response.schema_version.missing")
tcr_3.report(
code="error.trapi.response.message.knowledge_graph.node.missing",
identifier="foo:missing",
context="subject"
)
tcr_3.report(
code="skipped.test",
identifier="raise_object_entity",
context="object 'foo:missing[biolink:NamedThing]'",
reason=" and is a mixin since it is either not an ontology term " +
"or does not map onto a parent ontology term."
)
tcr_3.report(
code="warning.trapi.response.status.unknown",
identifier="fake-trapi-response-status"
)
test_cases: List[TestCaseRun] = [
tcr_1,
tcr_2
tcr_2,
tcr_3
]
formatted_output: Dict = gvt.format_results(test_cases)
assert formatted_output
formatted_output_1: Dict = gvt_1.format_results(test_cases)
assert formatted_output_1
by_subject_test_case_id: str = f"{test_asset_id}-by_subject"
assert formatted_output[by_subject_test_case_id]
assert formatted_output_1[by_subject_test_case_id]
by_object_test_case_id: str = f"{test_asset_id}-by_object"
assert formatted_output[by_object_test_case_id]
assert "ars" in formatted_output[by_object_test_case_id]
assert formatted_output[by_object_test_case_id]["ars"]
assert "status" in formatted_output[by_object_test_case_id]["ars"]
assert formatted_output[by_object_test_case_id]["ars"]["status"] == "SKIPPED"
assert not formatted_output[by_object_test_case_id]["ars"]["messages"]
assert formatted_output_1[by_object_test_case_id]
raise_object_entity_test_case_id: str = f"{test_asset_id}-raise_object_entity"
assert formatted_output_1[raise_object_entity_test_case_id]
assert "ars" in formatted_output_1[by_object_test_case_id]
assert formatted_output_1[by_object_test_case_id]["ars"]
assert "status" in formatted_output_1[by_object_test_case_id]["ars"]
assert formatted_output_1[by_object_test_case_id]["ars"]["status"] == "FAILED"
assert formatted_output_1[by_object_test_case_id]["ars"]["messages"]

0 comments on commit 2fc53d8

Please sign in to comment.