diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e8aa1982a0e..c26d071e5d6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -79,6 +79,20 @@ * +**Changes** + +* Change ``report.outcome`` for ``xpassed`` tests to ``"passed"`` in non-strict + mode and ``"failed"`` in strict mode. Thanks to `@hackebrot`_ for the PR + (`#1795`_) and `@gprasad84`_ for report (`#1546`_). + +* Tests marked with ``xfail(strict=False)`` (the default) now appear in + JUnitXML reports as passing tests instead of skipped. + Thanks to `@hackebrot`_ for the PR (`#1795`_). + +.. _#1795: https://github.com/pytest-dev/pytest/pull/1795 +.. _#1546: https://github.com/pytest-dev/pytest/issues/1546 +.. _@gprasad84: https://github.com/gprasad84 + .. _#1210: https://github.com/pytest-dev/pytest/issues/1210 .. _#1435: https://github.com/pytest-dev/pytest/issues/1435 .. _#1471: https://github.com/pytest-dev/pytest/issues/1471 diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 18e038d2c84..7f4d927d9e5 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem): pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if a @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + + TODO: this should be moved to the "compat" module. + """ + return sys.version_info >= (3, 4) + + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield @@ -228,9 +240,15 @@ def pytest_runtest_makereport(item, call): evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": - # we need to translate into how pytest encodes xpass - rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) - rep.outcome = "failed" + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): @@ -245,8 +263,15 @@ def pytest_runtest_makereport(item, call): rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": - rep.outcome = "failed" # xpass outcome - rep.wasxfail = evalxfail.getexplanation() + strict_default = item.config.getini('xfail_strict') + is_strict_xfail = evalxfail.get('strict', strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {0}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display @@ -260,7 +285,7 @@ def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "xfail" - elif report.failed: + elif report.passed: return "xpassed", "X", ("XPASS", {'yellow': True}) # called by the terminalreporter instance/plugin diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index d6e45384d91..249983ff5cb 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1080,22 +1080,23 @@ def test_increment(n, expected): reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - def test_xfail_passing_is_xpass(self, testdir): + @pytest.mark.parametrize('strict', [True, False]) + def test_xfail_passing_is_xpass(self, testdir, strict): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)), + pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected - """ + """.format(strict=strict) testdir.makepyfile(s) reprec = testdir.inline_run() - # xpass is fail, obviously :) - reprec.assertoutcome(passed=2, failed=1) + passed, failed = (2, 1) if strict else (3, 0) + reprec.assertoutcome(passed=passed, failed=failed) def test_parametrize_called_in_generate_tests(self, testdir): s = """ diff --git a/testing/test_config.py b/testing/test_config.py index 1997ddacdda..5a75f7d60ce 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -592,6 +592,7 @@ def test_setuppy_fallback(self, tmpdir): assert inicfg == {} def test_nothing(self, tmpdir): + tmpdir.chdir() rootdir, inifile, inicfg = determine_setup(None, [tmpdir]) assert rootdir == tmpdir assert inifile is None @@ -603,6 +604,7 @@ def test_with_specific_inifile(self, tmpdir): assert rootdir == tmpdir def test_with_arg_outside_cwd_without_inifile(self, tmpdir): + tmpdir.chdir() a = tmpdir.mkdir("a") b = tmpdir.mkdir("b") rootdir, inifile, inicfg = determine_setup(None, [a, b]) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index c29381a8655..899cc5880fa 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -100,7 +100,7 @@ def test_xpass(): result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5) + node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5) def test_summing_simple_with_errors(self, testdir): testdir.makepyfile(""" @@ -115,13 +115,16 @@ def test_fail(): def test_error(fixture): pass @pytest.mark.xfail + def test_xfail(): + assert False + @pytest.mark.xfail(strict=True) def test_xpass(): - assert 1 + assert True """) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(name="pytest", errors=1, failures=1, skips=1, tests=4) + node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5) def test_timing_function(self, testdir): testdir.makepyfile(""" @@ -346,16 +349,33 @@ def test_xpass(): result, dom = runandparse(testdir) # assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(skips=1, tests=1) + node.assert_attr(skips=0, tests=1) tnode = node.find_first_by_tag("testcase") tnode.assert_attr( file="test_xfailure_xpass.py", line="1", classname="test_xfailure_xpass", name="test_xpass") - fnode = tnode.find_first_by_tag("skipped") - fnode.assert_attr(message="xfail-marked test passes unexpectedly") - # assert "ValueError" in fnode.toxml() + + def test_xfailure_xpass_strict(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(strict=True, reason="This needs to fail!") + def test_xpass(): + pass + """) + result, dom = runandparse(testdir) + # assert result.ret + node = dom.find_first_by_tag("testsuite") + node.assert_attr(skips=0, tests=1) + tnode = node.find_first_by_tag("testcase") + tnode.assert_attr( + file="test_xfailure_xpass_strict.py", + line="1", + classname="test_xfailure_xpass_strict", + name="test_xpass") + fnode = tnode.find_first_by_tag("failure") + fnode.assert_attr(message="[XPASS(strict)] This needs to fail!") def test_collect_error(self, testdir): testdir.makepyfile("syntax error") diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 2bfb6a8dc58..3b4bc7bd2e1 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -145,7 +145,20 @@ def test_func(): def test_xfail_xpassed(self, testdir): item = testdir.getitem(""" import pytest - @pytest.mark.xfail + @pytest.mark.xfail(reason="this is an xfail") + def test_func(): + assert 1 + """) + reports = runtestprotocol(item, log=False) + assert len(reports) == 3 + callreport = reports[1] + assert callreport.passed + assert callreport.wasxfail == "this is an xfail" + + def test_xfail_xpassed_strict(self, testdir): + item = testdir.getitem(""" + import pytest + @pytest.mark.xfail(strict=True, reason="nope") def test_func(): assert 1 """) @@ -153,7 +166,8 @@ def test_func(): assert len(reports) == 3 callreport = reports[1] assert callreport.failed - assert callreport.wasxfail == "" + assert callreport.longrepr == "[XPASS(strict)] nope" + assert not hasattr(callreport, "wasxfail") def test_xfail_run_anyway(self, testdir): testdir.makepyfile(""" diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 144aad79bf4..9c35e4e3a55 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -419,8 +419,9 @@ def setup_class(cls): def test_method(self): pass """) + from _pytest.skipping import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() result = testdir.runpytest("-rxs") - assert result.ret == 0 result.stdout.fnmatch_lines_random([ "*XFAIL*test_trial_todo*", "*trialselfskip*", @@ -429,8 +430,9 @@ def test_method(self): "*i2wanto*", "*sys.version_info*", "*skip_in_method*", - "*4 skipped*3 xfail*1 xpass*", + "*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*", ]) + assert result.ret == (1 if should_fail else 0) def test_trial_error(self, testdir): testdir.makepyfile(""" @@ -587,24 +589,62 @@ def test_hello(self, arg1): assert "TypeError" in result.stdout.str() assert result.ret == 1 + @pytest.mark.skipif("sys.version_info < (2,7)") -def test_unittest_unexpected_failure(testdir): - testdir.makepyfile(""" +@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): + script = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure - def test_func1(self): - assert 0 + def test_failing_test_is_xfail(self): + assert False + if __name__ == '__main__': + unittest.main() + """) + if runner == 'pytest': + result = testdir.runpytest("-rxX") + result.stdout.fnmatch_lines([ + "*XFAIL*MyTestCase*test_failing_test_is_xfail*", + "*1 xfailed*", + ]) + else: + result = testdir.runpython(script) + result.stderr.fnmatch_lines([ + "*1 test in*", + "*OK*(expected failures=1)*", + ]) + assert result.ret == 0 + + +@pytest.mark.skipif("sys.version_info < (2,7)") +@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): + script = testdir.makepyfile(""" + import unittest + class MyTestCase(unittest.TestCase): @unittest.expectedFailure - def test_func2(self): - assert 1 + def test_passing_test_is_fail(self): + assert True + if __name__ == '__main__': + unittest.main() """) - result = testdir.runpytest("-rxX") - result.stdout.fnmatch_lines([ - "*XFAIL*MyTestCase*test_func1*", - "*XPASS*MyTestCase*test_func2*", - "*1 xfailed*1 xpass*", - ]) + from _pytest.skipping import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() + if runner == 'pytest': + result = testdir.runpytest("-rxX") + result.stdout.fnmatch_lines([ + "*MyTestCase*test_passing_test_is_fail*", + "*1 failed*" if should_fail else "*1 xpassed*", + ]) + else: + result = testdir.runpython(script) + result.stderr.fnmatch_lines([ + "*1 test in*", + "*(unexpected successes=1)*", + ]) + + assert result.ret == (1 if should_fail else 0) @pytest.mark.parametrize('fix_type, stmt', [