From df9e38ec2f99ba45a4c8c7079bb03001123eab92 Mon Sep 17 00:00:00 2001 From: Dirk Thomas Date: Mon, 30 Jan 2017 10:27:54 -0800 Subject: [PATCH] add skipped / disabled tests to catkin_test_results summary (#848) * Fixes #839: Adding disabled tests to catkin_test_results summary. * maintain public API --- bin/catkin_test_results | 7 ++- python/catkin/test_results.py | 83 ++++++++++++++++++++++------ test/unit_tests/test_test_results.py | 31 ++++++++++- 3 files changed, 99 insertions(+), 22 deletions(-) diff --git a/bin/catkin_test_results b/bin/catkin_test_results index 95f1e26f0..8a1686e9b 100755 --- a/bin/catkin_test_results +++ b/bin/catkin_test_results @@ -8,7 +8,7 @@ import sys # find the import relatively if available to work before installing catkin or overlaying installed version if os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'python', 'catkin', '__init__.py')): sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'python')) -from catkin.test_results import aggregate_results, print_summary, test_results +from catkin.test_results import aggregate_results, print_summary2, test_results2 def main(): @@ -24,10 +24,11 @@ def main(): sys.exit('Test results directory "%s" does not exist' % test_results_dir) try: - results = test_results( + results = test_results2( test_results_dir, show_verbose=args.verbose, show_all=args.all) _, sum_errors, sum_failures = aggregate_results(results) - print_summary(results, show_stable=args.all) + print_summary2(results, show_stable=args.all) + # Skipped tests alone should not count as a failure if sum_errors or sum_failures: sys.exit(1) except Exception as e: diff --git a/python/catkin/test_results.py b/python/catkin/test_results.py index d81d4dbc1..1a92d2c2d 100644 --- a/python/catkin/test_results.py +++ b/python/catkin/test_results.py @@ -96,12 +96,18 @@ def _get_missing_junit_result_filename(filename): def read_junit(filename): + """Same as `read_junit2` except it doesn't return num_skipped.""" + num_tests, num_errors, num_failures, _ = read_junit2(filename) + return (num_tests, num_errors, num_failures) + + +def read_junit2(filename): """ parses xml file expected to follow junit/gtest conventions see http://code.google.com/p/googletest/wiki/AdvancedGuide#Generating_an_XML_Report :param filename: str junit xml file name - :returns: num_tests, num_errors, num_failures + :returns: num_tests, num_errors, num_failures, num_skipped :raises ParseError: if xml is not well-formed :raises IOError: if filename does not exist """ @@ -110,17 +116,29 @@ def read_junit(filename): num_tests = int(root.attrib['tests']) num_errors = int(root.attrib['errors']) num_failures = int(root.attrib['failures']) - return (num_tests, num_errors, num_failures) + num_skipped = int(root.get('skip', '0')) + int(root.get('disabled', '0')) + return (num_tests, num_errors, num_failures, num_skipped) def test_results(test_results_dir, show_verbose=False, show_all=False): + """Same as `test_results2` except the returned values don't include num_skipped.""" + results = {} + results2 = test_results2( + test_results_dir, show_verbose=show_verbose, show_all=show_all) + for name, values in results2.items(): + num_tests, num_errors, num_failures, _ = values + results[name] = (num_tests, num_errors, num_failures) + return results + + +def test_results2(test_results_dir, show_verbose=False, show_all=False): ''' Collects test results by parsing all xml files in given path, attempting to interpret them as junit results. :param test_results_dir: str foldername :param show_verbose: bool show output for tests which had errors or failed - :returns: dict {rel_path, (num_tests, num_errors, num_failures)} + :returns: dict {rel_path, (num_tests, num_errors, num_failures, num_skipped)} ''' results = {} for dirpath, dirnames, filenames in os.walk(test_results_dir): @@ -130,12 +148,12 @@ def test_results(test_results_dir, show_verbose=False, show_all=False): filename_abs = os.path.join(dirpath, filename) name = filename_abs[len(test_results_dir) + 1:] try: - num_tests, num_errors, num_failures = read_junit(filename_abs) + num_tests, num_errors, num_failures, num_skipped = read_junit2(filename_abs) except Exception as e: if show_all: print('Skipping "%s": %s' % (name, str(e))) continue - results[name] = (num_tests, num_errors, num_failures) + results[name] = (num_tests, num_errors, num_failures, num_skipped) if show_verbose and (num_errors + num_failures > 0): print("Full test results for '%s'" % (name)) print('-------------------------------------------------') @@ -146,35 +164,68 @@ def test_results(test_results_dir, show_verbose=False, show_all=False): def aggregate_results(results, callback_per_result=None): + """Same as `aggregate_results2` except it doesn't return num_skipped.""" + callback = None + if callback_per_result is not None: + def callback(name, num_tests, num_errors, num_failures, num_skipped): + callback_per_result(name, num_tests, num_errors, num_failures) + sum_tests, sum_errors, sum_failures, _ = aggregate_results2( + results, callback_per_result=callback) + return (sum_tests, sum_errors, sum_failures) + + +def aggregate_results2(results, callback_per_result=None): """ Aggregate results :param results: dict as from test_results() - :returns: tuple (num_tests, num_errors, num_failures) + :returns: tuple (num_tests, num_errors, num_failures, num_skipped) """ - sum_tests = sum_errors = sum_failures = 0 + sum_tests = sum_errors = sum_failures = sum_skipped = 0 for name in sorted(results.keys()): - (num_tests, num_errors, num_failures) = results[name] + (num_tests, num_errors, num_failures, num_skipped) = results[name] sum_tests += num_tests sum_errors += num_errors sum_failures += num_failures + sum_skipped += num_skipped if callback_per_result: - callback_per_result(name, num_tests, num_errors, num_failures) - return sum_tests, sum_errors, sum_failures + callback_per_result( + name, num_tests, num_errors, num_failures, num_skipped) + return sum_tests, sum_errors, sum_failures, sum_skipped def print_summary(results, show_stable=False, show_unstable=True): + """Same as `print_summary2` except it doesn't print skipped tests.""" + print_summary2( + results, show_stable=show_stable, show_unstable=show_unstable, + print_skipped=False) + + +def print_summary2(results, show_stable=False, show_unstable=True, print_skipped=True): """ print summary to stdout :param results: dict as from test_results() :param show_stable: print tests without failures extra :param show_stable: print tests with failures extra + :param print_skipped: include skipped tests in output """ - def callback(name, num_tests, num_errors, num_failures): - if show_stable and not num_errors and not num_failures: + def callback(name, num_tests, num_errors, num_failures, num_skipped): + if show_stable and not num_errors and not num_failures and not num_skipped: print('%s: %d tests' % (name, num_tests)) - if show_unstable and (num_errors or num_failures): - print('%s: %d tests, %d errors, %d failures' % (name, num_tests, num_errors, num_failures)) - sum_tests, sum_errors, sum_failures = aggregate_results(results, callback) - print('Summary: %d tests, %d errors, %d failures' % (sum_tests, sum_errors, sum_failures)) + if show_unstable and (num_errors or num_failures or num_skipped): + msg = '{}: {} tests, {} errors, {} failures' + msg_args = [name, num_tests, num_errors, num_failures] + if print_skipped: + msg += ', {} skipped' + msg_args.append(num_skipped) + print(msg.format(*msg_args)) + sum_tests, sum_errors, sum_failures, sum_skipped = aggregate_results2(results, callback) + + msg = 'Summary: {} tests, {} errors, {} failures' + msg_args = [sum_tests, sum_errors, sum_failures] + if print_skipped: + msg += ', {} skipped' + msg_args.append(sum_skipped) + + print(msg.format(*msg_args)) diff --git a/test/unit_tests/test_test_results.py b/test/unit_tests/test_test_results.py index 324c40d9d..10e772613 100644 --- a/test/unit_tests/test_test_results.py +++ b/test/unit_tests/test_test_results.py @@ -26,9 +26,25 @@ def test_read_junit(self): result_file = os.path.join(rootdir, 'test1.xml') with open(result_file, 'w') as fhand: - fhand.write('') + fhand.write('') (num_tests, num_errors, num_failures) = catkin_test_results.read_junit(result_file) self.assertEqual((5, 1, 3), (num_tests, num_errors, num_failures)) + (num_tests, num_errors, num_failures, num_skipped) = catkin_test_results.read_junit2(result_file) + self.assertEqual((5, 1, 3, 2), (num_tests, num_errors, num_failures, num_skipped)) + finally: + shutil.rmtree(rootdir) + + def test_read_junit_skip(self): + try: + rootdir = tempfile.mkdtemp() + + result_file = os.path.join(rootdir, 'test1.xml') + with open(result_file, 'w') as fhand: + fhand.write('') + (num_tests, num_errors, num_failures) = catkin_test_results.read_junit(result_file) + self.assertEqual((5, 1, 3), (num_tests, num_errors, num_failures)) + (num_tests, num_errors, num_failures, num_skipped) = catkin_test_results.read_junit2(result_file) + self.assertEqual((5, 1, 3, 2), (num_tests, num_errors, num_failures, num_skipped)) finally: shutil.rmtree(rootdir) @@ -39,9 +55,11 @@ def test_test_results(self): for filename in ['test1.xml', 'test2.xml', 'foo.bar']: result_file = os.path.join(rootdir, filename) with open(result_file, 'w') as fhand: - fhand.write('') + fhand.write('') results = catkin_test_results.test_results(rootdir) self.assertEqual({'test1.xml': (5, 1, 3), 'test2.xml': (5, 1, 3)}, results) + results = catkin_test_results.test_results2(rootdir) + self.assertEqual({'test1.xml': (5, 1, 3, 2), 'test2.xml': (5, 1, 3, 2)}, results) finally: shutil.rmtree(rootdir) @@ -85,7 +103,7 @@ def test_test_results_detail_with_non_ascii(self): print(summary) def test_print_summary(self): - results = {'test1.xml': (5, 1, 3), 'test2.xml': (7, 2, 4)} + results = {'test1.xml': (5, 1, 3, 2), 'test2.xml': (7, 2, 4, 1)} try: oldstdout = sys.stdout sys.stdout = StringIO() @@ -95,5 +113,12 @@ def test_print_summary(self): self.assertTrue('7 tests, 2 errors, 4 failures' in summary, summary) self.assertTrue('12 tests, 3 errors, 7 failures' in summary, summary) + sys.stdout = StringIO() + catkin_test_results.print_summary2(results) + summary = sys.stdout.getvalue() + self.assertTrue('5 tests, 1 errors, 3 failures, 2 skipped' in summary, summary) + self.assertTrue('7 tests, 2 errors, 4 failures, 1 skipped' in summary, summary) + self.assertTrue('12 tests, 3 errors, 7 failures, 3 skipped' in summary, summary) + finally: sys.stdout = oldstdout