From 4b5cf3f7c549412e60baa925a42c80dbf17ffdd7 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Feb 2019 11:15:02 +0100 Subject: [PATCH 1/5] interpreter: add "protocol" kwarg to test This is the first step towards adding support for TAP. --- docs/markdown/Reference-manual.md | 11 +++++++---- mesonbuild/backend/backends.py | 5 +++-- mesonbuild/interpreter.py | 11 ++++++++--- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/docs/markdown/Reference-manual.md b/docs/markdown/Reference-manual.md index 6c3b3ad7a8f9..412135feff9b 100644 --- a/docs/markdown/Reference-manual.md +++ b/docs/markdown/Reference-manual.md @@ -1406,10 +1406,7 @@ executable to run. The executable can be an [executable build target object](#build-target-object) returned by [`executable()`](#executable) or an [external program object](#external-program-object) returned by -[`find_program()`](#find_program). The executable's exit code is used -by the test harness to record the outcome of the test, for example -exit code zero indicates success. For more on the Meson test harness -protocol read [Unit Tests](Unit-tests.md). +[`find_program()`](#find_program). Keyword arguments are the following: @@ -1446,6 +1443,12 @@ Keyword arguments are the following: before test is executed even if they have `build_by_default : false`. Since 0.46.0 +- `protocol` specifies how the test results are parsed. For now + it must be `exitcode`, that is the executable's exit code is used + by the test harness to record the outcome of the test. For example + an exit code of zero indicates success. For more on the Meson test harness + protocol read [Unit Tests](Unit-tests.md). Since 0.50.0 + Defined tests can be run in a backend-agnostic way by calling `meson test` inside the build dir, or by using backend-specific commands, such as `ninja test` or `msbuild RUN_TESTS.vcxproj`. diff --git a/mesonbuild/backend/backends.py b/mesonbuild/backend/backends.py index 506276708595..4d35d223d24b 100644 --- a/mesonbuild/backend/backends.py +++ b/mesonbuild/backend/backends.py @@ -84,7 +84,7 @@ def __init__(self, name, fname, cmd_args, env, is_cross, exe_wrapper, class TestSerialisation: def __init__(self, name, project, suite, fname, is_cross_built, exe_wrapper, is_parallel, - cmd_args, env, should_fail, timeout, workdir, extra_paths): + cmd_args, env, should_fail, timeout, workdir, extra_paths, protocol): self.name = name self.project_name = project self.suite = suite @@ -100,6 +100,7 @@ def __init__(self, name, project, suite, fname, is_cross_built, exe_wrapper, is_ self.timeout = timeout self.workdir = workdir self.extra_paths = extra_paths + self.protocol = protocol class OptionProxy: def __init__(self, name, value): @@ -756,7 +757,7 @@ def create_test_serialisation(self, tests): raise MesonException('Bad object in test command.') ts = TestSerialisation(t.get_name(), t.project_name, t.suite, cmd, is_cross, exe_wrapper, t.is_parallel, cmd_args, t.env, - t.should_fail, t.timeout, t.workdir, extra_paths) + t.should_fail, t.timeout, t.workdir, extra_paths, t.protocol) arr.append(ts) return arr diff --git a/mesonbuild/interpreter.py b/mesonbuild/interpreter.py index 5f19bc5db156..01597268dee0 100644 --- a/mesonbuild/interpreter.py +++ b/mesonbuild/interpreter.py @@ -850,7 +850,7 @@ def __repr__(self): class Test(InterpreterObject): def __init__(self, name, project, suite, exe, depends, is_parallel, - cmd_args, env, should_fail, timeout, workdir): + cmd_args, env, should_fail, timeout, workdir, protocol): InterpreterObject.__init__(self) self.name = name self.suite = suite @@ -863,6 +863,7 @@ def __init__(self, name, project, suite, exe, depends, is_parallel, self.should_fail = should_fail self.timeout = timeout self.workdir = workdir + self.protocol = protocol def get_exe(self): return self.exe @@ -1973,7 +1974,8 @@ def get_cross_property_method(self, args, kwargs): 'library': known_library_kwargs, 'subdir': {'if_found'}, 'subproject': {'version', 'default_options', 'required'}, - 'test': {'args', 'depends', 'env', 'is_parallel', 'should_fail', 'timeout', 'workdir', 'suite'}, + 'test': {'args', 'depends', 'env', 'is_parallel', 'should_fail', 'timeout', 'workdir', + 'suite', 'protocol'}, 'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'}, } @@ -3269,6 +3271,9 @@ def add_test(self, node, args, kwargs, is_base_test): workdir = None if not isinstance(timeout, int): raise InterpreterException('Timeout must be an integer.') + protocol = kwargs.get('protocol', 'exitcode') + if protocol not in ('exitcode',): + raise InterpreterException('Protocol must be "exitcode".') suite = [] prj = self.subproject if self.is_subproject() else self.build.project_name for s in mesonlib.stringlistify(kwargs.get('suite', '')): @@ -3280,7 +3285,7 @@ def add_test(self, node, args, kwargs, is_base_test): if not isinstance(dep, (build.CustomTarget, build.BuildTarget)): raise InterpreterException('Depends items must be build targets.') t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args, - env, should_fail, timeout, workdir) + env, should_fail, timeout, workdir, protocol) if is_base_test: self.build.tests.append(t) mlog.debug('Adding test', mlog.bold(args[0], True)) From 1264f03637b3a0b908913e8d4b064d3a2925a4ef Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Feb 2019 11:25:08 +0100 Subject: [PATCH 2/5] mtest: refactor TestRun creation Parse the error code outside SingleTestRunner's run() method. This will let us add TAP support without complicating that long method further. --- mesonbuild/mtest.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 6536558de259..74ddd656a151 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -149,8 +149,17 @@ class TestResult(enum.Enum): class TestRun: - def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd, - env): + @staticmethod + def make_exitcode(test, returncode, duration, stdo, stde, cmd): + if returncode == GNU_SKIP_RETURNCODE: + res = TestResult.SKIP + elif test.should_fail: + res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS + else: + res = TestResult.FAIL if bool(returncode) else TestResult.OK + return TestRun(test, res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env) + + def __init__(self, test, res, returncode, duration, stdo, stde, cmd): assert isinstance(res, TestResult) self.res = res self.returncode = returncode @@ -158,8 +167,8 @@ def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd, self.stdo = stdo self.stde = stde self.cmd = cmd - self.env = env - self.should_fail = should_fail + self.env = test.env + self.should_fail = test.should_fail def get_log(self): res = '--- command ---\n' @@ -257,9 +266,8 @@ def run(self): cmd = self._get_cmd() if cmd is None: skip_stdout = 'Not run because can not execute cross compiled binaries.' - return TestRun(res=TestResult.SKIP, returncode=GNU_SKIP_RETURNCODE, - should_fail=self.test.should_fail, duration=0.0, - stdo=skip_stdout, stde=None, cmd=None, env=self.test.env) + return TestRun(test=self.test, res=TestResult.SKIP, returncode=GNU_SKIP_RETURNCODE, + duration=0.0, stdo=skip_stdout, stde=None, cmd=None) else: wrap = TestHarness.get_wrapper(self.options) if self.options.gdb: @@ -388,14 +396,9 @@ def preexec_fn(): stdo = "" stde = additional_error if timed_out: - res = TestResult.TIMEOUT - elif p.returncode == GNU_SKIP_RETURNCODE: - res = TestResult.SKIP - elif self.test.should_fail: - res = TestResult.EXPECTEDFAIL if bool(p.returncode) else TestResult.UNEXPECTEDPASS + return TestRun(self.test, TestResult.TIMEOUT, p.returncode, duration, stdo, stde, cmd) else: - res = TestResult.FAIL if bool(p.returncode) else TestResult.OK - return TestRun(res, p.returncode, self.test.should_fail, duration, stdo, stde, cmd, self.test.env) + return TestRun.make_exitcode(self.test, p.returncode, duration, stdo, stde, cmd) class TestHarness: From d830945224cf6d109189da03e924d2dffc6214cd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Feb 2019 16:58:19 +0100 Subject: [PATCH 3/5] mtest: do not use return code to look for failed tests --print-errorlogs is using the test's return code to look for failed tests, instead of just looking at the TestResult. Simplify the code and make it work for TAP too. --- mesonbuild/mtest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 74ddd656a151..57b4a1211e45 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -496,9 +496,10 @@ def print_stats(self, numlen, tests, name, result, i): (num, name, padding1, result.res.value, padding2, result.duration, status) ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL) + bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS) if not self.options.quiet or result.res not in ok_statuses: if result.res not in ok_statuses and mlog.colorize_console: - if result.res in (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS): + if result.res in bad_statuses: decorator = mlog.red elif result.res is TestResult.SKIP: decorator = mlog.yellow @@ -508,8 +509,7 @@ def print_stats(self, numlen, tests, name, result, i): else: print(result_str) result_str += "\n\n" + result.get_log() - if (result.returncode != GNU_SKIP_RETURNCODE) \ - and (result.returncode != 0) != result.should_fail: + if result.res in bad_statuses: if self.options.print_errorlogs: self.collected_logs.append(result_str) if self.logfile: From f2e513791e56886a145a8e72854841b9f9122ca6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Feb 2019 17:51:10 +0100 Subject: [PATCH 4/5] mtest: add support for hard errors Hard errors also come from the GNU Automake test protocol. They happen when e.g., the set-up of a test case scenario fails, or when some other unexpected or highly undesirable condition is encountered. TAP will use them for parse errors too. Add them to the exitcode protocol first. --- docs/markdown/Unit-tests.md | 4 +++- mesonbuild/mtest.py | 14 +++++++++++--- test cases/failing test/4 hard error/main.c | 3 +++ test cases/failing test/4 hard error/meson.build | 4 ++++ 4 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 test cases/failing test/4 hard error/main.c create mode 100644 test cases/failing test/4 hard error/meson.build diff --git a/docs/markdown/Unit-tests.md b/docs/markdown/Unit-tests.md index a8e72737cb23..9148bd5a04ba 100644 --- a/docs/markdown/Unit-tests.md +++ b/docs/markdown/Unit-tests.md @@ -51,10 +51,12 @@ By default Meson uses as many concurrent processes as there are cores on the tes $ MESON_TESTTHREADS=5 ninja test ``` -## Skipped tests +## Skipped tests and hard errors Sometimes a test can only determine at runtime that it can not be run. The GNU standard approach in this case is to exit the program with error code 77. Meson will detect this and report these tests as skipped rather than failed. This behavior was added in version 0.37.0. +In addition, sometimes a test fails set up so that it should fail even if it is marked as an expected failure. The GNU standard approach in this case is to exit the program with error code 99. Again, Meson will detect this and report these tests as `ERROR`, ignoring the setting of `should_fail`. This behavior was added in version 0.50.0. + ## Testing tool The goal of the meson test tool is to provide a simple way to run tests in a variety of different ways. The tool is designed to be run in the build directory. diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 57b4a1211e45..21e54032bc5d 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -36,6 +36,10 @@ # mean that the test should be skipped. GNU_SKIP_RETURNCODE = 77 +# GNU autotools interprets a return code of 99 from tests it executes to +# mean that the test failed even before testing what it is supposed to test. +GNU_ERROR_RETURNCODE = 99 + def is_windows(): platname = platform.system().lower() return platname == 'windows' or 'mingw' in platname @@ -146,6 +150,7 @@ class TestResult(enum.Enum): FAIL = 'FAIL' EXPECTEDFAIL = 'EXPECTEDFAIL' UNEXPECTEDPASS = 'UNEXPECTEDPASS' + ERROR = 'ERROR' class TestRun: @@ -153,11 +158,13 @@ class TestRun: def make_exitcode(test, returncode, duration, stdo, stde, cmd): if returncode == GNU_SKIP_RETURNCODE: res = TestResult.SKIP + elif returncode == GNU_ERROR_RETURNCODE: + res = TestResult.ERROR elif test.should_fail: res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS else: res = TestResult.FAIL if bool(returncode) else TestResult.OK - return TestRun(test, res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env) + return TestRun(test, res, returncode, duration, stdo, stde, cmd) def __init__(self, test, res, returncode, duration, stdo, stde, cmd): assert isinstance(res, TestResult) @@ -474,7 +481,7 @@ def process_test_result(self, result): self.skip_count += 1 elif result.res is TestResult.OK: self.success_count += 1 - elif result.res is TestResult.FAIL: + elif result.res is TestResult.FAIL or result.res is TestResult.ERROR: self.fail_count += 1 elif result.res is TestResult.EXPECTEDFAIL: self.expectedfail_count += 1 @@ -496,7 +503,8 @@ def print_stats(self, numlen, tests, name, result, i): (num, name, padding1, result.res.value, padding2, result.duration, status) ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL) - bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS) + bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS, + TestResult.ERROR) if not self.options.quiet or result.res not in ok_statuses: if result.res not in ok_statuses and mlog.colorize_console: if result.res in bad_statuses: diff --git a/test cases/failing test/4 hard error/main.c b/test cases/failing test/4 hard error/main.c new file mode 100644 index 000000000000..a1e705ade272 --- /dev/null +++ b/test cases/failing test/4 hard error/main.c @@ -0,0 +1,3 @@ +int main(void) { + return 99; +} diff --git a/test cases/failing test/4 hard error/meson.build b/test cases/failing test/4 hard error/meson.build new file mode 100644 index 000000000000..6979b0416934 --- /dev/null +++ b/test cases/failing test/4 hard error/meson.build @@ -0,0 +1,4 @@ +project('trivial', 'c') + +# Exit code 99 even overrides should_fail +test('My Test', executable('main', 'main.c'), should_fail: true) From 91f847d308b57adec89245308b60ae063026b456 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 27 Feb 2019 07:25:33 +0100 Subject: [PATCH 5/5] mtest: implement TAP parsing This provides an initial support for parsing TAP output. It detects failures and skipped tests without relying on exit code, as well as early termination of the test due to an error or a crash. For now, subtests are not recorded in the TestRun object. However, because the TAP output goes on stdout, it is printed by --print-errorlogs when a test does not behave as expected. Handling subtests as TestRuns, and serializing them to JSON, can be added later. The parser was written specifically for Meson, and comes with its own test suite. Fixes #2923. --- docs/markdown/Reference-manual.md | 10 +- docs/markdown/Unit-tests.md | 6 +- mesonbuild/interpreter.py | 4 +- mesonbuild/mtest.py | 187 +++++++++++- run_unittests.py | 271 ++++++++++++++++++ test cases/common/212 tap tests/meson.build | 10 + test cases/common/212 tap tests/tester.c | 10 + .../failing test/5 tap tests/meson.build | 6 + test cases/failing test/5 tap tests/tester.c | 10 + 9 files changed, 505 insertions(+), 9 deletions(-) create mode 100644 test cases/common/212 tap tests/meson.build create mode 100644 test cases/common/212 tap tests/tester.c create mode 100644 test cases/failing test/5 tap tests/meson.build create mode 100644 test cases/failing test/5 tap tests/tester.c diff --git a/docs/markdown/Reference-manual.md b/docs/markdown/Reference-manual.md index 412135feff9b..f2b0416d326c 100644 --- a/docs/markdown/Reference-manual.md +++ b/docs/markdown/Reference-manual.md @@ -1443,11 +1443,11 @@ Keyword arguments are the following: before test is executed even if they have `build_by_default : false`. Since 0.46.0 -- `protocol` specifies how the test results are parsed. For now - it must be `exitcode`, that is the executable's exit code is used - by the test harness to record the outcome of the test. For example - an exit code of zero indicates success. For more on the Meson test harness - protocol read [Unit Tests](Unit-tests.md). Since 0.50.0 +- `protocol` specifies how the test results are parsed and can be one + of `exitcode` (the executable's exit code is used by the test harness + to record the outcome of the test) or `tap` ([Test Anything + Protocol](https://www.testanything.org/)). For more on the Meson test + harness protocol read [Unit Tests](Unit-tests.md). Since 0.50.0 Defined tests can be run in a backend-agnostic way by calling `meson test` inside the build dir, or by using backend-specific diff --git a/docs/markdown/Unit-tests.md b/docs/markdown/Unit-tests.md index 9148bd5a04ba..9e617391099f 100644 --- a/docs/markdown/Unit-tests.md +++ b/docs/markdown/Unit-tests.md @@ -53,7 +53,11 @@ $ MESON_TESTTHREADS=5 ninja test ## Skipped tests and hard errors -Sometimes a test can only determine at runtime that it can not be run. The GNU standard approach in this case is to exit the program with error code 77. Meson will detect this and report these tests as skipped rather than failed. This behavior was added in version 0.37.0. +Sometimes a test can only determine at runtime that it can not be run. + +For the default `exitcode` testing protocol, the GNU standard approach in this case is to exit the program with error code 77. Meson will detect this and report these tests as skipped rather than failed. This behavior was added in version 0.37.0. + +For TAP-based tests, skipped tests should print a single line starting with `1..0 # SKIP`. In addition, sometimes a test fails set up so that it should fail even if it is marked as an expected failure. The GNU standard approach in this case is to exit the program with error code 99. Again, Meson will detect this and report these tests as `ERROR`, ignoring the setting of `should_fail`. This behavior was added in version 0.50.0. diff --git a/mesonbuild/interpreter.py b/mesonbuild/interpreter.py index 01597268dee0..8bde727a4968 100644 --- a/mesonbuild/interpreter.py +++ b/mesonbuild/interpreter.py @@ -3272,8 +3272,8 @@ def add_test(self, node, args, kwargs, is_base_test): if not isinstance(timeout, int): raise InterpreterException('Timeout must be an integer.') protocol = kwargs.get('protocol', 'exitcode') - if protocol not in ('exitcode',): - raise InterpreterException('Protocol must be "exitcode".') + if protocol not in ('exitcode', 'tap'): + raise InterpreterException('Protocol must be "exitcode" or "tap".') suite = [] prj = self.subproject if self.is_subproject() else self.build.project_name for s in mesonlib.stringlistify(kwargs.get('suite', '')): diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 21e54032bc5d..02b728e45b69 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -23,6 +23,9 @@ from mesonbuild.mesonlib import substring_is_in_list, MesonException from mesonbuild import mlog +from collections import namedtuple +import io +import re import tempfile import time, datetime, multiprocessing, json import concurrent.futures as conc @@ -153,6 +156,150 @@ class TestResult(enum.Enum): ERROR = 'ERROR' +class TAPParser(object): + Plan = namedtuple('Plan', ['count', 'late', 'skipped', 'explanation']) + Bailout = namedtuple('Bailout', ['message']) + Test = namedtuple('Test', ['number', 'name', 'result', 'explanation']) + Error = namedtuple('Error', ['message']) + Version = namedtuple('Version', ['version']) + + _MAIN = 1 + _AFTER_TEST = 2 + _YAML = 3 + + _RE_BAILOUT = r'Bail out!\s*(.*)' + _RE_DIRECTIVE = r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?' + _RE_PLAN = r'1\.\.([0-9]+)' + _RE_DIRECTIVE + _RE_TEST = r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE + _RE_VERSION = r'TAP version ([0-9]+)' + _RE_YAML_START = r'(\s+)---.*' + _RE_YAML_END = r'\s+\.\.\.\s*' + + def __init__(self, io): + self.io = io + + def parse_test(self, ok, num, name, directive, explanation): + name = name.strip() + explanation = explanation.strip() if explanation else None + if directive is not None: + directive = directive.upper() + if directive == 'SKIP': + if ok: + yield self.Test(num, name, TestResult.SKIP, explanation) + return + elif directive == 'TODO': + yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation) + return + else: + yield self.Error('invalid directive "%s"' % (directive,)) + + yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation) + + def parse(self): + found_late_test = False + bailed_out = False + plan = None + lineno = 0 + num_tests = 0 + yaml_lineno = None + yaml_indent = None + state = self._MAIN + version = 12 + while True: + lineno += 1 + try: + line = next(self.io).rstrip() + except StopIteration: + break + + # YAML blocks are only accepted after a test + if state == self._AFTER_TEST: + if version >= 13: + m = re.match(self._RE_YAML_START, line) + if m: + state = self._YAML + yaml_lineno = lineno + yaml_indent = m.group(1) + continue + state = self._MAIN + + elif state == self._YAML: + if re.match(self._RE_YAML_END, line): + state = self._MAIN + continue + if line.startswith(yaml_indent): + continue + yield self.Error('YAML block not terminated (started on line %d)' % (yaml_lineno,)) + state = self._MAIN + + assert state == self._MAIN + if line.startswith('#'): + continue + + m = re.match(self._RE_TEST, line) + if m: + if plan and plan.late and not found_late_test: + yield self.Error('unexpected test after late plan') + found_late_test = True + num_tests += 1 + num = num_tests if m.group(2) is None else int(m.group(2)) + if num != num_tests: + yield self.Error('out of order test numbers') + yield from self.parse_test(m.group(1) == 'ok', num, + m.group(3), m.group(4), m.group(5)) + state = self._AFTER_TEST + continue + + m = re.match(self._RE_PLAN, line) + if m: + if plan: + yield self.Error('more than one plan found') + else: + count = int(m.group(1)) + skipped = (count == 0) + if m.group(2): + if m.group(2).upper().startswith('SKIP'): + if count > 0: + yield self.Error('invalid SKIP directive for plan') + skipped = True + else: + yield self.Error('invalid directive for plan') + plan = self.Plan(count=count, late=(num_tests > 0), + skipped=skipped, explanation=m.group(3)) + yield plan + continue + + m = re.match(self._RE_BAILOUT, line) + if m: + yield self.Bailout(m.group(1)) + bailed_out = True + continue + + m = re.match(self._RE_VERSION, line) + if m: + # The TAP version is only accepted as the first line + if lineno != 1: + yield self.Error('version number must be on the first line') + continue + version = int(m.group(1)) + if version < 13: + yield self.Error('version number should be at least 13') + else: + yield self.Version(version=version) + continue + + yield self.Error('unexpected input at line %d' % (lineno,)) + + if state == self._YAML: + yield self.Error('YAML block not terminated (started on line %d)' % (yaml_lineno,)) + + if not bailed_out and plan and num_tests != plan.count: + if num_tests < plan.count: + yield self.Error('Too few tests run (expected %d, got %d)' % (plan.count, num_tests)) + else: + yield self.Error('Too many tests run (expected %d, got %d)' % (plan.count, num_tests)) + + class TestRun: @staticmethod def make_exitcode(test, returncode, duration, stdo, stde, cmd): @@ -166,6 +313,41 @@ def make_exitcode(test, returncode, duration, stdo, stde, cmd): res = TestResult.FAIL if bool(returncode) else TestResult.OK return TestRun(test, res, returncode, duration, stdo, stde, cmd) + def make_tap(test, returncode, duration, stdo, stde, cmd): + res = None + num_tests = 0 + failed = False + num_skipped = 0 + + for i in TAPParser(io.StringIO(stdo)).parse(): + if isinstance(i, TAPParser.Bailout): + res = TestResult.ERROR + elif isinstance(i, TAPParser.Test): + if i.result == TestResult.SKIP: + num_skipped += 1 + elif i.result in (TestResult.FAIL, TestResult.UNEXPECTEDPASS): + failed = True + num_tests += 1 + elif isinstance(i, TAPParser.Error): + res = TestResult.ERROR + stde += '\nTAP parsing error: ' + i.message + + if returncode != 0: + res = TestResult.ERROR + stde += '\n(test program exited with status code %d)' % (returncode,) + + if res is None: + # Now determine the overall result of the test based on the outcome of the subcases + if num_skipped == num_tests: + # This includes the case where num_tests is zero + res = TestResult.SKIP + elif test.should_fail: + res = TestResult.EXPECTEDFAIL if failed else TestResult.UNEXPECTEDPASS + else: + res = TestResult.FAIL if failed else TestResult.OK + + return TestRun(test, res, returncode, duration, stdo, stde, cmd) + def __init__(self, test, res, returncode, duration, stdo, stde, cmd): assert isinstance(res, TestResult) self.res = res @@ -405,7 +587,10 @@ def preexec_fn(): if timed_out: return TestRun(self.test, TestResult.TIMEOUT, p.returncode, duration, stdo, stde, cmd) else: - return TestRun.make_exitcode(self.test, p.returncode, duration, stdo, stde, cmd) + if self.test.protocol == 'exitcode': + return TestRun.make_exitcode(self.test, p.returncode, duration, stdo, stde, cmd) + else: + return TestRun.make_tap(self.test, p.returncode, duration, stdo, stde, cmd) class TestHarness: diff --git a/run_unittests.py b/run_unittests.py index 32c7875fb5b5..e13903a657be 100755 --- a/run_unittests.py +++ b/run_unittests.py @@ -27,6 +27,7 @@ import platform import pickle import functools +import io from itertools import chain from unittest import mock from configparser import ConfigParser @@ -54,6 +55,8 @@ from mesonbuild.build import Target import mesonbuild.modules.pkgconfig +from mesonbuild.mtest import TAPParser, TestResult + from run_tests import ( Backend, FakeBuild, FakeCompilerOptions, ensure_backend_detects_changes, exe_suffix, get_backend_commands, @@ -5728,6 +5731,272 @@ def test_cross_file_dirs_overriden(self): '-Ddef_sysconfdir=sysconfbar']) +class TAPParserTests(unittest.TestCase): + def assert_test(self, events, **kwargs): + if 'explanation' not in kwargs: + kwargs['explanation'] = None + self.assertEqual(next(events), TAPParser.Test(**kwargs)) + + def assert_plan(self, events, **kwargs): + if 'skipped' not in kwargs: + kwargs['skipped'] = False + if 'explanation' not in kwargs: + kwargs['explanation'] = None + self.assertEqual(next(events), TAPParser.Plan(**kwargs)) + + def assert_version(self, events, **kwargs): + self.assertEqual(next(events), TAPParser.Version(**kwargs)) + + def assert_error(self, events): + self.assertEqual(type(next(events)), TAPParser.Error) + + def assert_bailout(self, events, **kwargs): + self.assertEqual(next(events), TAPParser.Bailout(**kwargs)) + + def assert_last(self, events): + with self.assertRaises(StopIteration): + next(events) + + def parse_tap(self, s): + parser = TAPParser(io.StringIO(s)) + return iter(parser.parse()) + + def parse_tap_v13(self, s): + events = self.parse_tap('TAP version 13\n' + s) + self.assert_version(events, version=13) + return events + + def test_empty(self): + events = self.parse_tap('') + self.assert_last(events) + + def test_empty_plan(self): + events = self.parse_tap('1..0') + self.assert_plan(events, count=0, late=False, skipped=True) + self.assert_last(events) + + def test_plan_directive(self): + events = self.parse_tap('1..0 # skipped for some reason') + self.assert_plan(events, count=0, late=False, skipped=True, + explanation='for some reason') + self.assert_last(events) + + events = self.parse_tap('1..1 # skipped for some reason\nok 1') + self.assert_error(events) + self.assert_plan(events, count=1, late=False, skipped=True, + explanation='for some reason') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + events = self.parse_tap('1..1 # todo not supported here\nok 1') + self.assert_error(events) + self.assert_plan(events, count=1, late=False, skipped=False, + explanation='not supported here') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_one_test_ok(self): + events = self.parse_tap('ok') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_one_test_with_number(self): + events = self.parse_tap('ok 1') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_one_test_with_name(self): + events = self.parse_tap('ok 1 abc') + self.assert_test(events, number=1, name='abc', result=TestResult.OK) + self.assert_last(events) + + def test_one_test_not_ok(self): + events = self.parse_tap('not ok') + self.assert_test(events, number=1, name='', result=TestResult.FAIL) + self.assert_last(events) + + def test_one_test_todo(self): + events = self.parse_tap('not ok 1 abc # TODO') + self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL) + self.assert_last(events) + + events = self.parse_tap('ok 1 abc # TODO') + self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS) + self.assert_last(events) + + def test_one_test_skip(self): + events = self.parse_tap('ok 1 abc # SKIP') + self.assert_test(events, number=1, name='abc', result=TestResult.SKIP) + self.assert_last(events) + + def test_one_test_skip_failure(self): + events = self.parse_tap('not ok 1 abc # SKIP') + self.assert_test(events, number=1, name='abc', result=TestResult.FAIL) + self.assert_last(events) + + def test_many_early_plan(self): + events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4') + self.assert_plan(events, count=4, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_test(events, number=3, name='', result=TestResult.OK) + self.assert_test(events, number=4, name='', result=TestResult.FAIL) + self.assert_last(events) + + def test_many_late_plan(self): + events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_test(events, number=3, name='', result=TestResult.OK) + self.assert_test(events, number=4, name='', result=TestResult.FAIL) + self.assert_plan(events, count=4, late=True) + self.assert_last(events) + + def test_directive_case(self): + events = self.parse_tap('ok 1 abc # skip') + self.assert_test(events, number=1, name='abc', result=TestResult.SKIP) + self.assert_last(events) + + events = self.parse_tap('ok 1 abc # ToDo') + self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS) + self.assert_last(events) + + def test_directive_explanation(self): + events = self.parse_tap('ok 1 abc # skip why') + self.assert_test(events, number=1, name='abc', result=TestResult.SKIP, + explanation='why') + self.assert_last(events) + + events = self.parse_tap('ok 1 abc # ToDo Because') + self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS, + explanation='Because') + self.assert_last(events) + + def test_one_test_early_plan(self): + events = self.parse_tap('1..1\nok') + self.assert_plan(events, count=1, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_one_test_late_plan(self): + events = self.parse_tap('ok\n1..1') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_plan(events, count=1, late=True) + self.assert_last(events) + + def test_out_of_order(self): + events = self.parse_tap('ok 2') + self.assert_error(events) + self.assert_test(events, number=2, name='', result=TestResult.OK) + self.assert_last(events) + + def test_middle_plan(self): + events = self.parse_tap('ok 1\n1..2\nok 2') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_plan(events, count=2, late=True) + self.assert_error(events) + self.assert_test(events, number=2, name='', result=TestResult.OK) + self.assert_last(events) + + def test_too_many_plans(self): + events = self.parse_tap('1..1\n1..2\nok 1') + self.assert_plan(events, count=1, late=False) + self.assert_error(events) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_too_many(self): + events = self.parse_tap('ok 1\nnot ok 2\n1..1') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_plan(events, count=1, late=True) + self.assert_error(events) + self.assert_last(events) + + events = self.parse_tap('1..1\nok 1\nnot ok 2') + self.assert_plan(events, count=1, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_error(events) + self.assert_last(events) + + def test_too_few(self): + events = self.parse_tap('ok 1\nnot ok 2\n1..3') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_plan(events, count=3, late=True) + self.assert_error(events) + self.assert_last(events) + + events = self.parse_tap('1..3\nok 1\nnot ok 2') + self.assert_plan(events, count=3, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_error(events) + self.assert_last(events) + + def test_too_few_bailout(self): + events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test') + self.assert_plan(events, count=3, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_bailout(events, message='no third test') + self.assert_last(events) + + def test_diagnostics(self): + events = self.parse_tap('1..1\n# ignored\nok 1') + self.assert_plan(events, count=1, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too') + self.assert_plan(events, count=1, late=False) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_plan(events, count=1, late=True) + self.assert_last(events) + + def test_unexpected(self): + events = self.parse_tap('1..1\ninvalid\nok 1') + self.assert_plan(events, count=1, late=False) + self.assert_error(events) + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_last(events) + + def test_version(self): + events = self.parse_tap('TAP version 13\n') + self.assert_version(events, version=13) + self.assert_last(events) + + events = self.parse_tap('TAP version 12\n') + self.assert_error(events) + self.assert_last(events) + + events = self.parse_tap('1..0\nTAP version 13\n') + self.assert_plan(events, count=0, late=False, skipped=True) + self.assert_error(events) + self.assert_last(events) + + def test_yaml(self): + events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_test(events, number=2, name='', result=TestResult.OK) + self.assert_last(events) + + events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_error(events) + self.assert_last(events) + + events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2') + self.assert_test(events, number=1, name='', result=TestResult.OK) + self.assert_error(events) + self.assert_test(events, number=2, name='', result=TestResult.FAIL) + self.assert_last(events) + def unset_envs(): # For unit tests we must fully control all command lines # so that there are no unexpected changes coming from the @@ -5741,6 +6010,8 @@ def main(): unset_envs() cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests', 'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests', + 'TAPParserTests', + 'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests', 'WindowsTests', 'DarwinTests'] diff --git a/test cases/common/212 tap tests/meson.build b/test cases/common/212 tap tests/meson.build new file mode 100644 index 000000000000..58529a72ebcd --- /dev/null +++ b/test cases/common/212 tap tests/meson.build @@ -0,0 +1,10 @@ +project('test features', 'c') + +tester = executable('tester', 'tester.c') +test('pass', tester, args : ['ok'], protocol: 'tap') +test('fail', tester, args : ['not ok'], should_fail: true, protocol: 'tap') +test('xfail', tester, args : ['not ok # todo'], protocol: 'tap') +test('xpass', tester, args : ['ok # todo'], should_fail: true, protocol: 'tap') +test('skip', tester, args : ['ok # skip'], protocol: 'tap') +test('skip failure', tester, args : ['not ok # skip'], should_fail: true, protocol: 'tap') +test('no tests', tester, args : ['1..0 # skip'], protocol: 'tap') diff --git a/test cases/common/212 tap tests/tester.c b/test cases/common/212 tap tests/tester.c new file mode 100644 index 000000000000..ac582e7c0130 --- /dev/null +++ b/test cases/common/212 tap tests/tester.c @@ -0,0 +1,10 @@ +#include + +int main(int argc, char **argv) { + if (argc != 2) { + fprintf(stderr, "Incorrect number of arguments, got %i\n", argc); + return 1; + } + puts(argv[1]); + return 0; +} diff --git a/test cases/failing test/5 tap tests/meson.build b/test cases/failing test/5 tap tests/meson.build new file mode 100644 index 000000000000..844c1f99009f --- /dev/null +++ b/test cases/failing test/5 tap tests/meson.build @@ -0,0 +1,6 @@ +project('test features', 'c') + +tester = executable('tester', 'tester.c') +test('nonzero return code', tester, args : [], protocol: 'tap') +test('missing test', tester, args : ['1..1'], protocol: 'tap') +test('incorrect skip', tester, args : ['1..1 # skip\nok 1'], protocol: 'tap') diff --git a/test cases/failing test/5 tap tests/tester.c b/test cases/failing test/5 tap tests/tester.c new file mode 100644 index 000000000000..ac582e7c0130 --- /dev/null +++ b/test cases/failing test/5 tap tests/tester.c @@ -0,0 +1,10 @@ +#include + +int main(int argc, char **argv) { + if (argc != 2) { + fprintf(stderr, "Incorrect number of arguments, got %i\n", argc); + return 1; + } + puts(argv[1]); + return 0; +}