1""" support for skip/xfail functions and markers. """
2from __future__ import absolute_import, division, print_function
3
4from _pytest.config import hookimpl
5from _pytest.mark.evaluate import MarkEvaluator
6from _pytest.outcomes import fail, skip, xfail
7
8
9def pytest_addoption(parser):
10    group = parser.getgroup("general")
11    group.addoption(
12        "--runxfail",
13        action="store_true",
14        dest="runxfail",
15        default=False,
16        help="run tests even if they are marked xfail",
17    )
18
19    parser.addini(
20        "xfail_strict",
21        "default for the strict parameter of xfail "
22        "markers when not given explicitly (default: False)",
23        default=False,
24        type="bool",
25    )
26
27
28def pytest_configure(config):
29    if config.option.runxfail:
30        # yay a hack
31        import pytest
32
33        old = pytest.xfail
34        config._cleanup.append(lambda: setattr(pytest, "xfail", old))
35
36        def nop(*args, **kwargs):
37            pass
38
39        nop.Exception = xfail.Exception
40        setattr(pytest, "xfail", nop)
41
42    config.addinivalue_line(
43        "markers",
44        "skip(reason=None): skip the given test function with an optional reason. "
45        'Example: skip(reason="no way of currently testing this") skips the '
46        "test.",
47    )
48    config.addinivalue_line(
49        "markers",
50        "skipif(condition): skip the given test function if eval(condition) "
51        "results in a True value.  Evaluation happens within the "
52        "module global context. Example: skipif('sys.platform == \"win32\"') "
53        "skips the test if we are on the win32 platform. see "
54        "http://pytest.org/latest/skipping.html",
55    )
56    config.addinivalue_line(
57        "markers",
58        "xfail(condition, reason=None, run=True, raises=None, strict=False): "
59        "mark the test function as an expected failure if eval(condition) "
60        "has a True value. Optionally specify a reason for better reporting "
61        "and run=False if you don't even want to execute the test function. "
62        "If only specific exception(s) are expected, you can list them in "
63        "raises, and if the test fails in other ways, it will be reported as "
64        "a true failure. See http://pytest.org/latest/skipping.html",
65    )
66
67
68@hookimpl(tryfirst=True)
69def pytest_runtest_setup(item):
70    # Check if skip or skipif are specified as pytest marks
71    item._skipped_by_mark = False
72    eval_skipif = MarkEvaluator(item, "skipif")
73    if eval_skipif.istrue():
74        item._skipped_by_mark = True
75        skip(eval_skipif.getexplanation())
76
77    for skip_info in item.iter_markers(name="skip"):
78        item._skipped_by_mark = True
79        if "reason" in skip_info.kwargs:
80            skip(skip_info.kwargs["reason"])
81        elif skip_info.args:
82            skip(skip_info.args[0])
83        else:
84            skip("unconditional skip")
85
86    item._evalxfail = MarkEvaluator(item, "xfail")
87    check_xfail_no_run(item)
88
89
90@hookimpl(hookwrapper=True)
91def pytest_pyfunc_call(pyfuncitem):
92    check_xfail_no_run(pyfuncitem)
93    outcome = yield
94    passed = outcome.excinfo is None
95    if passed:
96        check_strict_xfail(pyfuncitem)
97
98
99def check_xfail_no_run(item):
100    """check xfail(run=False)"""
101    if not item.config.option.runxfail:
102        evalxfail = item._evalxfail
103        if evalxfail.istrue():
104            if not evalxfail.get("run", True):
105                xfail("[NOTRUN] " + evalxfail.getexplanation())
106
107
108def check_strict_xfail(pyfuncitem):
109    """check xfail(strict=True) for the given PASSING test"""
110    evalxfail = pyfuncitem._evalxfail
111    if evalxfail.istrue():
112        strict_default = pyfuncitem.config.getini("xfail_strict")
113        is_strict_xfail = evalxfail.get("strict", strict_default)
114        if is_strict_xfail:
115            del pyfuncitem._evalxfail
116            explanation = evalxfail.getexplanation()
117            fail("[XPASS(strict)] " + explanation, pytrace=False)
118
119
120@hookimpl(hookwrapper=True)
121def pytest_runtest_makereport(item, call):
122    outcome = yield
123    rep = outcome.get_result()
124    evalxfail = getattr(item, "_evalxfail", None)
125    # unitttest special case, see setting of _unexpectedsuccess
126    if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
127        from _pytest.compat import _is_unittest_unexpected_success_a_failure
128
129        if item._unexpectedsuccess:
130            rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
131        else:
132            rep.longrepr = "Unexpected success"
133        if _is_unittest_unexpected_success_a_failure():
134            rep.outcome = "failed"
135        else:
136            rep.outcome = "passed"
137            rep.wasxfail = rep.longrepr
138    elif item.config.option.runxfail:
139        pass  # don't interefere
140    elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
141        rep.wasxfail = "reason: " + call.excinfo.value.msg
142        rep.outcome = "skipped"
143    elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
144        if call.excinfo:
145            if evalxfail.invalidraise(call.excinfo.value):
146                rep.outcome = "failed"
147            else:
148                rep.outcome = "skipped"
149                rep.wasxfail = evalxfail.getexplanation()
150        elif call.when == "call":
151            strict_default = item.config.getini("xfail_strict")
152            is_strict_xfail = evalxfail.get("strict", strict_default)
153            explanation = evalxfail.getexplanation()
154            if is_strict_xfail:
155                rep.outcome = "failed"
156                rep.longrepr = "[XPASS(strict)] {}".format(explanation)
157            else:
158                rep.outcome = "passed"
159                rep.wasxfail = explanation
160    elif getattr(item, "_skipped_by_mark", False) and rep.skipped and type(
161        rep.longrepr
162    ) is tuple:
163        # skipped by mark.skipif; change the location of the failure
164        # to point to the item definition, otherwise it will display
165        # the location of where the skip exception was raised within pytest
166        filename, line, reason = rep.longrepr
167        filename, line = item.location[:2]
168        rep.longrepr = filename, line, reason
169
170
171# called by terminalreporter progress reporting
172
173
174def pytest_report_teststatus(report):
175    if hasattr(report, "wasxfail"):
176        if report.skipped:
177            return "xfailed", "x", "xfail"
178        elif report.passed:
179            return "xpassed", "X", ("XPASS", {"yellow": True})
180
181
182# called by the terminalreporter instance/plugin
183
184
185def pytest_terminal_summary(terminalreporter):
186    tr = terminalreporter
187    if not tr.reportchars:
188        # for name in "xfailed skipped failed xpassed":
189        #    if not tr.stats.get(name, 0):
190        #        tr.write_line("HINT: use '-r' option to see extra "
191        #              "summary info about tests")
192        #        break
193        return
194
195    lines = []
196    for char in tr.reportchars:
197        action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None)
198        action(terminalreporter, lines)
199
200    if lines:
201        tr._tw.sep("=", "short test summary info")
202        for line in lines:
203            tr._tw.line(line)
204
205
206def show_simple(terminalreporter, lines, stat, format):
207    failed = terminalreporter.stats.get(stat)
208    if failed:
209        for rep in failed:
210            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
211            lines.append(format % (pos,))
212
213
214def show_xfailed(terminalreporter, lines):
215    xfailed = terminalreporter.stats.get("xfailed")
216    if xfailed:
217        for rep in xfailed:
218            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
219            reason = rep.wasxfail
220            lines.append("XFAIL %s" % (pos,))
221            if reason:
222                lines.append("  " + str(reason))
223
224
225def show_xpassed(terminalreporter, lines):
226    xpassed = terminalreporter.stats.get("xpassed")
227    if xpassed:
228        for rep in xpassed:
229            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
230            reason = rep.wasxfail
231            lines.append("XPASS %s %s" % (pos, reason))
232
233
234def folded_skips(skipped):
235    d = {}
236    for event in skipped:
237        key = event.longrepr
238        assert len(key) == 3, (event, key)
239        keywords = getattr(event, "keywords", {})
240        # folding reports with global pytestmark variable
241        # this is workaround, because for now we cannot identify the scope of a skip marker
242        # TODO: revisit after marks scope would be fixed
243        when = getattr(event, "when", None)
244        if when == "setup" and "skip" in keywords and "pytestmark" not in keywords:
245            key = (key[0], None, key[2])
246        d.setdefault(key, []).append(event)
247    values = []
248    for key, events in d.items():
249        values.append((len(events),) + key)
250    return values
251
252
253def show_skipped(terminalreporter, lines):
254    tr = terminalreporter
255    skipped = tr.stats.get("skipped", [])
256    if skipped:
257        # if not tr.hasopt('skipped'):
258        #    tr.write_line(
259        #        "%d skipped tests, specify -rs for more info" %
260        #        len(skipped))
261        #    return
262        fskips = folded_skips(skipped)
263        if fskips:
264            # tr.write_sep("_", "skipped test summary")
265            for num, fspath, lineno, reason in fskips:
266                if reason.startswith("Skipped: "):
267                    reason = reason[9:]
268                if lineno is not None:
269                    lines.append(
270                        "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason)
271                    )
272                else:
273                    lines.append("SKIP [%d] %s: %s" % (num, fspath, reason))
274
275
276def shower(stat, format):
277
278    def show_(terminalreporter, lines):
279        return show_simple(terminalreporter, lines, stat, format)
280
281    return show_
282
283
284REPORTCHAR_ACTIONS = {
285    "x": show_xfailed,
286    "X": show_xpassed,
287    "f": shower("failed", "FAIL %s"),
288    "F": shower("failed", "FAIL %s"),
289    "s": show_skipped,
290    "S": show_skipped,
291    "p": shower("passed", "PASSED %s"),
292    "E": shower("error", "ERROR %s"),
293}
294