diff --git a/scripts/py_matter_yamltests/matter_yamltests/hooks.py b/scripts/py_matter_yamltests/matter_yamltests/hooks.py index f471af83408589..7e779420759e6c 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/hooks.py +++ b/scripts/py_matter_yamltests/matter_yamltests/hooks.py @@ -98,12 +98,15 @@ def stop(self, duration: int): """ pass - def test_start(self, name: str, count: int): + def test_start(self, filename: str, name: str, count: int): """ This method is called when the runner starts running a single test. Parameters ---------- + filename: str + The name of the file containing the test that is starting. + name: str The name of the test that is starting. @@ -126,7 +129,7 @@ def test_stop(self, exception: Exception, duration: int): """ pass - def step_skipped(self, name: str): + def step_skipped(self, name: str, expression: str): """ This method is called when running a step is skipped. @@ -134,6 +137,9 @@ def step_skipped(self, name: str): ---------- name: str The name of the test step that is skipped. + + expression: str + The PICS expression that results in the test step being skipped. """ pass @@ -148,7 +154,7 @@ def step_start(self, name: str): """ pass - def step_success(self, logger, logs, duration: int): + def step_success(self, logger, logs, duration: int, request): """ This method is called when running a step succeeds. @@ -162,10 +168,13 @@ def step_success(self, logger, logs, duration: int): duration: int How long it took to run the test step, in milliseconds. + + request: + The original request as defined by the test step. """ pass - def step_failure(self, logger, logs, duration: int, expected, received): + def step_failure(self, logger, logs, duration: int, request, received): """ This method is called when running a step fails. @@ -180,8 +189,8 @@ def step_failure(self, logger, logs, duration: int, expected, received): duration: int How long it took to run the test step, in milliseconds. - expected: - The expected response as defined by the test step. + request: + The original request as defined by the test step. received: The received response. diff --git a/scripts/py_matter_yamltests/matter_yamltests/parser.py b/scripts/py_matter_yamltests/matter_yamltests/parser.py index 8f9b38a85f63f8..1affdf21fd6564 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/parser.py +++ b/scripts/py_matter_yamltests/matter_yamltests/parser.py @@ -180,6 +180,7 @@ def __init__(self, test: dict, config: dict, definitions: SpecDefinitions, pics_ self.attribute = _value_or_none(test, 'attribute') self.event = _value_or_none(test, 'event') self.endpoint = _value_or_config(test, 'endpoint', config) + self.pics = _value_or_none(test, 'PICS') self.is_pics_enabled = pics_checker.check(_value_or_none(test, 'PICS')) self.identity = _value_or_none(test, 'identity') @@ -565,6 +566,10 @@ def wait_for(self): def event_number(self): return self._test.event_number + @property + def pics(self): + return self._test.pics + def post_process_response(self, received_responses): result = PostProcessResponseResult() @@ -955,11 +960,12 @@ class TestParserConfig: class TestParser: def __init__(self, test_file: str, parser_config: TestParserConfig = TestParserConfig()): yaml_loader = YamlLoader() - name, pics, config, tests = yaml_loader.load(test_file) + filename, name, pics, config, tests = yaml_loader.load(test_file) self.__apply_config_override(config, parser_config.config_override) self.__apply_legacy_config(config) + self.filename = filename self.name = name self.PICS = pics self.tests = YamlTests( diff --git a/scripts/py_matter_yamltests/matter_yamltests/runner.py b/scripts/py_matter_yamltests/matter_yamltests/runner.py index 4c5db7a9d2b8d8..884d2c249467df 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/runner.py +++ b/scripts/py_matter_yamltests/matter_yamltests/runner.py @@ -157,12 +157,12 @@ async def _run(self, parser: TestParser, config: TestRunnerConfig): await self.start() hooks = config.hooks - hooks.test_start(parser.name, parser.tests.count) + hooks.test_start(parser.filename, parser.name, parser.tests.count) test_duration = 0 for idx, request in enumerate(parser.tests): if not request.is_pics_enabled: - hooks.step_skipped(request.label) + hooks.step_skipped(request.label, request.pics) continue elif not config.adapter: hooks.step_start(request.label) @@ -185,9 +185,9 @@ async def _run(self, parser: TestParser, config: TestRunnerConfig): if logger.is_failure(): hooks.step_failure(logger, logs, duration, - request.responses, responses) + request, responses) else: - hooks.step_success(logger, logs, duration) + hooks.step_success(logger, logs, duration, request) if logger.is_failure() and config.options.stop_on_error: status = False diff --git a/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py b/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py index 543de252dc3820..2332419c0d10ee 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py +++ b/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py @@ -25,6 +25,8 @@ except: from yaml import SafeLoader +import os + import yaml @@ -32,12 +34,14 @@ class YamlLoader: """This class loads a file from the disk and validates that the content is a well formed yaml test.""" def load(self, yaml_file: str) -> tuple[str, Union[list, str], dict, list]: + filename = '' name = '' pics = None config = {} tests = [] if yaml_file: + filename = os.path.splitext(os.path.basename(yaml_file))[0] with open(yaml_file) as f: loader = SafeLoader add_yaml_support_for_scientific_notation_without_dot(loader) @@ -50,7 +54,7 @@ def load(self, yaml_file: str) -> tuple[str, Union[list, str], dict, list]: config = content.get('config', {}) tests = content.get('tests', []) - return (name, pics, config, tests) + return (filename, name, pics, config, tests) def __check_content(self, content): schema = { diff --git a/scripts/py_matter_yamltests/test_yaml_loader.py b/scripts/py_matter_yamltests/test_yaml_loader.py index d67dd062667ba1..1e748e317a3082 100644 --- a/scripts/py_matter_yamltests/test_yaml_loader.py +++ b/scripts/py_matter_yamltests/test_yaml_loader.py @@ -66,7 +66,8 @@ def test_missing_file(self): content = None - name, pics, config, tests = load(content) + filename, name, pics, config, tests = load(content) + self.assertEqual(filename, '') self.assertEqual(name, '') self.assertEqual(pics, None) self.assertEqual(config, {}) @@ -77,7 +78,8 @@ def test_empty_file(self): content = '' - name, pics, config, tests = load(content) + filename, name, pics, config, tests = load(content) + self.assertEqual(name, '') self.assertEqual(name, '') self.assertEqual(pics, None) self.assertEqual(config, {}) @@ -99,7 +101,7 @@ def test_key_name(self): name: Test Name ''' - name, _, _, _ = load(content) + _, name, _, _, _ = load(content) self.assertEqual(name, 'Test Name') def test_key_name_wrong_values(self): @@ -117,7 +119,7 @@ def test_key_pics_string(self): PICS: OO.S ''' - _, pics, _, _ = load(content) + _, _, pics, _, _ = load(content) self.assertEqual(pics, 'OO.S') def test_key_pics_list(self): @@ -129,7 +131,7 @@ def test_key_pics_list(self): - OO.C ''' - _, pics, _, _ = load(content) + _, _, pics, _, _ = load(content) self.assertEqual(pics, ['OO.S', 'OO.C']) def test_key_pics_wrong_values(self): @@ -149,7 +151,7 @@ def test_key_config(self): name2: value2 ''' - _, _, config, _ = load(content) + _, _, _, config, _ = load(content) self.assertEqual(config, {'name': 'value', 'name2': 'value2'}) def test_key_config_wrong_values(self): @@ -169,7 +171,7 @@ def test_key_tests(self): - label: Test2 ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [{'label': 'Test1'}, {'label': 'Test2'}]) def test_key_tests_wrong_values(self): @@ -202,7 +204,7 @@ def test_key_tests_step_bool_keys(self): wrong_values = self._get_wrong_values([bool], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=True)) + _, _, _, _, tests = load(content.format(key=key, value=True)) self.assertEqual(tests, [{key: True}]) for value in wrong_values: @@ -232,7 +234,7 @@ def test_key_tests_step_str_keys(self): wrong_values = self._get_wrong_values([str], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value='a string')) + _, _, _, _, tests = load(content.format(key=key, value='a string')) self.assertEqual(tests, [{key: 'a string'}]) for value in wrong_values: @@ -256,7 +258,7 @@ def test_key_tests_step_int_keys(self): wrong_values = self._get_wrong_values([int], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=1)) + _, _, _, _, tests = load(content.format(key=key, value=1)) self.assertEqual(tests, [{key: 1}]) for value in wrong_values: @@ -276,7 +278,8 @@ def test_key_tests_step_dict_keys(self): ' value: True\n') wrong_values = self._get_wrong_values([dict], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=valid_value)) + _, _, _, _, tests = load( + content.format(key=key, value=valid_value)) self.assertEqual(tests, [{key: {'value': True}}]) for value in wrong_values: @@ -291,12 +294,12 @@ def test_key_tests_step_response_key(self): value = ('\n' ' value: True\n') - _, _, _, tests = load(content.format(value=value)) + _, _, _, _, tests = load(content.format(value=value)) self.assertEqual(tests, [{'response': {'value': True}}]) value = ('\n' ' - value: True\n') - _, _, _, tests = load(content.format(value=value)) + _, _, _, _, tests = load(content.format(value=value)) self.assertEqual(tests, [{'response': [{'value': True}]}]) wrong_values = self._get_wrong_values([dict, list], spaces=6) @@ -310,10 +313,10 @@ def test_key_tests_step_event_number_key(self): content = ('tests:\n' ' - eventNumber: {value}') - _, _, _, tests = load(content.format(value=1)) + _, _, _, _, tests = load(content.format(value=1)) self.assertEqual(tests, [{'eventNumber': 1}]) - _, _, _, tests = load(content.format(value='TestKey')) + _, _, _, _, tests = load(content.format(value='TestKey')) self.assertEqual(tests, [{'eventNumber': 'TestKey'}]) wrong_values = self._get_wrong_values([str, int], spaces=6) @@ -328,7 +331,7 @@ def test_key_tests_step_verification_key(self): ' - verification: {value}\n' ' disabled: true') - _, _, _, tests = load(content.format(value='Test Sentence')) + _, _, _, _, tests = load(content.format(value='Test Sentence')) self.assertEqual( tests, [{'verification': 'Test Sentence', 'disabled': True}]) @@ -392,7 +395,7 @@ def test_key_tests_step_rule_step_with_verification_should_be_disabled_or_intera disabled: true ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [ {'label': 'A Test Name', 'verification': 'A verification sentence', 'disabled': True}]) @@ -412,7 +415,7 @@ def test_key_tests_step_rule_step_with_verification_should_be_disabled_or_intera command: UserPrompt ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [ {'label': 'A Test Name', 'verification': 'A verification sentence', 'command': 'UserPrompt'}]) @@ -427,7 +430,7 @@ def test_key_tests_step_response_key_values_key(self): ' - response:\n' ' values: {value}') - _, _, _, tests = load(content.format(value=[])) + _, _, _, _, tests = load(content.format(value=[])) self.assertEqual(tests, [{'response': {'values': []}}]) wrong_values = self._get_wrong_values([list], spaces=8) @@ -442,7 +445,7 @@ def test_key_tests_step_response_key_error_key(self): ' - response:\n' ' error: {value}') - _, _, _, tests = load(content.format(value='AnError')) + _, _, _, _, tests = load(content.format(value='AnError')) self.assertEqual(tests, [{'response': {'error': 'AnError'}}]) wrong_values = self._get_wrong_values([str], spaces=8) @@ -457,7 +460,7 @@ def test_key_tests_step_response_key_cluster_error_key(self): ' - response:\n' ' clusterError: {value}') - _, _, _, tests = load(content.format(value=1)) + _, _, _, _, tests = load(content.format(value=1)) self.assertEqual(tests, [{'response': {'clusterError': 1}}]) wrong_values = self._get_wrong_values([int], spaces=8) @@ -472,7 +475,7 @@ def test_key_tests_step_response_key_constraints_key(self): ' - response:\n' ' constraints: {value}') - _, _, _, tests = load(content.format(value={})) + _, _, _, _, tests = load(content.format(value={})) self.assertEqual(tests, [{'response': {'constraints': {}}}]) wrong_values = self._get_wrong_values([dict], spaces=8) @@ -487,7 +490,7 @@ def test_key_tests_step_response_key_save_as_key(self): ' - response:\n' ' saveAs: {value}') - _, _, _, tests = load(content.format(value='AKey')) + _, _, _, _, tests = load(content.format(value='AKey')) self.assertEqual(tests, [{'response': {'saveAs': 'AKey'}}]) wrong_values = self._get_wrong_values([str], spaces=8) diff --git a/scripts/tests/yaml/chiptool.py b/scripts/tests/yaml/chiptool.py index c5d6724b1459cd..d47a4d22d32882 100755 --- a/scripts/tests/yaml/chiptool.py +++ b/scripts/tests/yaml/chiptool.py @@ -85,6 +85,7 @@ def chiptool_runner_options(f): CONTEXT_SETTINGS['ignore_unknown_options'] = True +CONTEXT_SETTINGS['default_map']['chiptool']['use_test_harness_log_format'] = True @click.command(context_settings=CONTEXT_SETTINGS) diff --git a/scripts/tests/yaml/runner.py b/scripts/tests/yaml/runner.py index 9889976cf9e002..7ccc6cf50253ea 100755 --- a/scripts/tests/yaml/runner.py +++ b/scripts/tests/yaml/runner.py @@ -71,6 +71,8 @@ def test_runner_options(f): help='Show additional logs provided by the adapter.')(f) f = click.option('--show_adapter_logs_on_error', type=bool, default=True, show_default=True, help='Show additional logs provided by the adapter on error.')(f) + f = click.option('--use_test_harness_log_format', type=bool, default=False, show_default=True, + help='Use the test harness log format.')(f) return f @@ -261,11 +263,11 @@ def dry_run(parser_group: ParserGroup): @runner_base.command() @test_runner_options @pass_parser_group -def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool): +def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool): """Run the test suite.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) runner = TestRunner() @@ -276,11 +278,11 @@ def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_wa @test_runner_options @websocket_runner_options @pass_parser_group -def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, server_address: str, server_port: int, server_path: str, server_name: str, server_arguments: str): +def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool, server_address: str, server_port: int, server_path: str, server_name: str, server_arguments: str): """Run the test suite using websockets.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) if server_path is None and server_name: @@ -299,11 +301,11 @@ def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop @test_runner_options @chip_repl_runner_options @pass_parser_group -def chip_repl(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, runner: str, repl_storage_path: str, commission_on_network_dut: bool): +def chip_repl(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool, runner: str, repl_storage_path: str, commission_on_network_dut: bool): """Run the test suite using chip-repl.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) runner = __import__(runner, fromlist=[None]).Runner(repl_storage_path, commission_on_network_dut) diff --git a/scripts/tests/yaml/tests_logger.py b/scripts/tests/yaml/tests_logger.py index 4a0bd553a03047..bdfed70030fd83 100755 --- a/scripts/tests/yaml/tests_logger.py +++ b/scripts/tests/yaml/tests_logger.py @@ -128,11 +128,23 @@ class RunnerStrings: error_header = click.style('\t\t Error at step {index}:', fg='white', bold=True) error_line = click.style('\t\t {error_line}', fg='white') + test_harness_test_start = '\t\t***** Test Start : {filename}' + test_harness_test_stop_success = '\t\t***** Test Complete: {filename}' + test_harness_step_skipped = '\t\t**** Skipping: {expression} == false' + test_harness_step_start = '\t\t***** Test Step {index} : {name}' + test_harness_step_failure = '\t\t***** Test Failure : {message}' + test_harness_setup_device_connection_success = '\t\t**** Test Setup: Device Connected' + test_harness_setup_device_connection_failure = '\t\t**** Test Setup: Device Connection Failure [deviceId={deviceId}. Error {message}]' + log = '\t\t{message}' + user_prompt = '\t\tUSER_PROMPT: {message}' + class TestRunnerLogger(TestRunnerHooks): - def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: bool = True): + def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: bool = True, use_test_harness_log_format: bool = False): self.__show_adapter_logs = show_adapter_logs self.__show_adapter_logs_on_error = show_adapter_logs_on_error + self.__use_test_harness_log_format = use_test_harness_log_format + self.__filename = None self.__index = 1 self.__successes = 0 self.__warnings = 0 @@ -144,14 +156,17 @@ def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: def start(self, count: int): print(self.__strings.start) - pass def stop(self, duration: int): print(self.__strings.stop.format(runned=self.__runned, skipped=self.__skipped, duration=duration)) - def test_start(self, name: str, count: int): + def test_start(self, filename: str, name: str, count: int): print(self.__strings.test_start.format(name=click.style(name, bold=True), count=click.style(count, bold=True))) + if self.__use_test_harness_log_format: + self.__filename = filename + print(self.__strings.test_harness_test_start.format(filename=filename)) + def test_stop(self, duration: int): if self.__errors: state = _FAILURE @@ -160,18 +175,27 @@ def test_stop(self, duration: int): else: state = _SUCCESS + if self.__use_test_harness_log_format and (state == _SUCCESS or state == _WARNING): + print(self.__strings.test_harness_test_stop_success.format(filename=self.__filename)) + successes = click.style(self.__successes, bold=True) errors = click.style(self.__errors, bold=True) warnings = click.style(self.__warnings, bold=True) print(self.__strings.test_stop.format(state=state, successes=successes, errors=errors, warnings=warnings, duration=duration)) - def step_skipped(self, name: str): + def step_skipped(self, name: str, expression: str): print(self.__strings.step_skipped.format(index=self.__index, name=_strikethrough(name))) self.__index += 1 self.__skipped += 1 + if self.__use_test_harness_log_format: + print(self.__strings.test_harness_step_skipped.format(expression=expression)) + def step_start(self, name: str): + if self.__use_test_harness_log_format: + print(self.__strings.test_harness_step_start.format(index=self.__index, name=name)) + print(self.__strings.step_start.format(index=self.__index, name=click.style(name, bold=True)), end='') # flushing stdout such that the previous print statement is visible on the screen for long running tasks. sys.stdout.flush() @@ -183,10 +207,19 @@ def step_unknown(self): self.__runned += 1 - def step_success(self, logger, logs, duration: int): + def step_success(self, logger, logs, duration: int, request): print(self.__strings.step_result.format(state=_SUCCESS, duration=duration)) self.__print_results(logger) + if self.__use_test_harness_log_format: + if request.command == 'WaitForCommissionee': + print(self.__strings.test_harness_setup_device_connection_success) + elif request.command == 'Log': + message = request.arguments['values'][0]['value'] + print(self.__strings.log.format(message=f'{message}')) + elif request.command == 'UserPrompt': + message = request.arguments['values'][0]['value'] + print(self.__strings.user_prompt.format(message=f'{message}')) if self.__show_adapter_logs: self.__log_printer.print(logs) @@ -196,7 +229,7 @@ def step_success(self, logger, logs, duration: int): self.__errors += logger.errors self.__runned += 1 - def step_failure(self, logger, logs, duration: int, expected, received): + def step_failure(self, logger, logs, duration: int, request, received): print(self.__strings.step_result.format(state=_FAILURE, duration=duration)) self.__print_results(logger) @@ -217,13 +250,24 @@ def step_failure(self, logger, logs, duration: int, expected, received): has_failures_without_exception = True if has_failures_without_exception: - self.__print_failure(expected, received) + self.__print_failure(request.responses, received) self.__successes += logger.successes self.__warnings += logger.warnings self.__errors += logger.errors self.__runned += 1 + if self.__use_test_harness_log_format: + message = '' + for entry in logger.entries: + if entry.is_error(): + message = entry.message + print(self.__strings.test_harness_step_failure.format(message=message)) + break + + if request.command == 'WaitForCommissionee': + print(self.__strings.test_harness_setup_device_connection_failure.format(deviceId=request.node_id, message=message)) + def __print_step_exception(self, exception: TestStepError): if exception.context is None: return @@ -348,7 +392,7 @@ def parser(): @simulate.command() def runner(): """Simulate running tests.""" - runner_logger = TestRunnerLogger() + runner_logger = TestRunnerLogger(use_test_harness_log_format=True) class TestLogger: def __init__(self, entries=[], successes=0, warnings=0, errors=0): @@ -378,12 +422,12 @@ def __init__(self, message, module='CTL', level='Others'): ] runner_logger.start(99) - runner_logger.test_start('test.yaml', 23) + runner_logger.test_start('Test_File', 'A test with multiple steps', 23) runner_logger.step_start('First Step') runner_logger.step_success(success_logger, empty_logs, 1234) runner_logger.step_start('Second Step') runner_logger.step_failure(error_logger, other_logs, 4321, expected_response, received_response) - runner_logger.step_skipped('Third Step') + runner_logger.step_skipped('Third Step', 'SHOULD_RUN') runner_logger.step_start('Fourth Step') runner_logger.step_unknown() runner_logger.test_stop(1234 + 4321)