aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/pytest/py3/_pytest/skipping.py
diff options
context:
space:
mode:
authordeshevoy <deshevoy@yandex-team.ru>2022-02-10 16:46:57 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:57 +0300
commit28148f76dbfcc644d96427d41c92f36cbf2fdc6e (patch)
treeb83306b6e37edeea782e9eed673d89286c4fef35 /contrib/python/pytest/py3/_pytest/skipping.py
parente988f30484abe5fdeedcc7a5d3c226c01a21800c (diff)
downloadydb-28148f76dbfcc644d96427d41c92f36cbf2fdc6e.tar.gz
Restoring authorship annotation for <deshevoy@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/pytest/py3/_pytest/skipping.py')
-rw-r--r--contrib/python/pytest/py3/_pytest/skipping.py176
1 files changed, 88 insertions, 88 deletions
diff --git a/contrib/python/pytest/py3/_pytest/skipping.py b/contrib/python/pytest/py3/_pytest/skipping.py
index 791b0baf44..9aacfecee7 100644
--- a/contrib/python/pytest/py3/_pytest/skipping.py
+++ b/contrib/python/pytest/py3/_pytest/skipping.py
@@ -12,79 +12,79 @@ from typing import Type
import attr
from _pytest.config import Config
-from _pytest.config import hookimpl
+from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.mark.structures import Mark
from _pytest.nodes import Item
-from _pytest.outcomes import fail
-from _pytest.outcomes import skip
-from _pytest.outcomes import xfail
+from _pytest.outcomes import fail
+from _pytest.outcomes import skip
+from _pytest.outcomes import xfail
from _pytest.reports import BaseReport
from _pytest.runner import CallInfo
from _pytest.store import StoreKey
-
-
+
+
def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--runxfail",
- action="store_true",
- dest="runxfail",
- default=False,
+ group = parser.getgroup("general")
+ group.addoption(
+ "--runxfail",
+ action="store_true",
+ dest="runxfail",
+ default=False,
help="report the results of xfail tests as if they were not marked",
- )
-
- parser.addini(
- "xfail_strict",
- "default for the strict parameter of xfail "
- "markers when not given explicitly (default: False)",
- default=False,
- type="bool",
- )
-
-
+ )
+
+ parser.addini(
+ "xfail_strict",
+ "default for the strict parameter of xfail "
+ "markers when not given explicitly (default: False)",
+ default=False,
+ type="bool",
+ )
+
+
def pytest_configure(config: Config) -> None:
- if config.option.runxfail:
- # yay a hack
- import pytest
-
- old = pytest.xfail
- config._cleanup.append(lambda: setattr(pytest, "xfail", old))
-
- def nop(*args, **kwargs):
- pass
-
+ if config.option.runxfail:
+ # yay a hack
+ import pytest
+
+ old = pytest.xfail
+ config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+
+ def nop(*args, **kwargs):
+ pass
+
nop.Exception = xfail.Exception # type: ignore[attr-defined]
- setattr(pytest, "xfail", nop)
-
- config.addinivalue_line(
- "markers",
- "skip(reason=None): skip the given test function with an optional reason. "
- 'Example: skip(reason="no way of currently testing this") skips the '
- "test.",
- )
- config.addinivalue_line(
- "markers",
+ setattr(pytest, "xfail", nop)
+
+ config.addinivalue_line(
+ "markers",
+ "skip(reason=None): skip the given test function with an optional reason. "
+ 'Example: skip(reason="no way of currently testing this") skips the '
+ "test.",
+ )
+ config.addinivalue_line(
+ "markers",
"skipif(condition, ..., *, reason=...): "
"skip the given test function if any of the conditions evaluate to True. "
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
- )
- config.addinivalue_line(
- "markers",
+ )
+ config.addinivalue_line(
+ "markers",
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if any of the conditions "
"evaluate to True. Optionally specify a reason for better reporting "
- "and run=False if you don't even want to execute the test function. "
- "If only specific exception(s) are expected, you can list them in "
- "raises, and if the test fails in other ways, it will be reported as "
+ "and run=False if you don't even want to execute the test function. "
+ "If only specific exception(s) are expected, you can list them in "
+ "raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
- )
-
-
+ )
+
+
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
"""Evaluate a single skipif/xfail condition.
-
+
If an old-style string condition is given, it is eval()'d, otherwise the
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
is raised.
@@ -146,24 +146,24 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
if reason is None:
if isinstance(condition, str):
reason = "condition: " + condition
- else:
+ else:
# XXX better be checked at collection time
msg = (
"Error evaluating %r: " % mark.name
+ "you need to specify reason=STRING when using booleans as conditions."
)
fail(msg, pytrace=False)
-
+
return result, reason
-
-
+
+
@attr.s(slots=True, frozen=True)
class Skip:
"""The result of evaluate_skip_marks()."""
-
+
reason = attr.ib(type=str)
-
-
+
+
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
@@ -171,18 +171,18 @@ def evaluate_skip_marks(item: Item) -> Optional[Skip]:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
-
+
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
-
+
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
-
+
for mark in item.iter_markers(name="skip"):
if "reason" in mark.kwargs:
reason = mark.kwargs["reason"]
@@ -249,7 +249,7 @@ def pytest_runtest_setup(item: Item) -> None:
xfail("[NOTRUN] " + xfailed.reason)
-@hookimpl(hookwrapper=True)
+@hookimpl(hookwrapper=True)
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
xfailed = item._store.get(xfailed_key, None)
if xfailed is None:
@@ -268,57 +268,57 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
- outcome = yield
- rep = outcome.get_result()
+ outcome = yield
+ rep = outcome.get_result()
xfailed = item._store.get(xfailed_key, None)
# unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key]
if reason:
rep.longrepr = f"Unexpected success: {reason}"
- else:
- rep.longrepr = "Unexpected success"
+ else:
+ rep.longrepr = "Unexpected success"
rep.outcome = "failed"
- elif item.config.option.runxfail:
+ elif item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None
- rep.wasxfail = "reason: " + call.excinfo.value.msg
- rep.outcome = "skipped"
+ rep.wasxfail = "reason: " + call.excinfo.value.msg
+ rep.outcome = "skipped"
elif not rep.skipped and xfailed:
- if call.excinfo:
+ if call.excinfo:
raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
- rep.outcome = "failed"
- else:
- rep.outcome = "skipped"
+ rep.outcome = "failed"
+ else:
+ rep.outcome = "skipped"
rep.wasxfail = xfailed.reason
- elif call.when == "call":
+ elif call.when == "call":
if xfailed.strict:
- rep.outcome = "failed"
+ rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
- else:
- rep.outcome = "passed"
+ else:
+ rep.outcome = "passed"
rep.wasxfail = xfailed.reason
if (
item._store.get(skipped_by_mark_key, True)
- and rep.skipped
- and type(rep.longrepr) is tuple
- ):
+ and rep.skipped
+ and type(rep.longrepr) is tuple
+ ):
# Skipped by mark.skipif; change the location of the failure
- # to point to the item definition, otherwise it will display
+ # to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest.
_, _, reason = rep.longrepr
filename, line = item.reportinfo()[:2]
assert line is not None
rep.longrepr = str(filename), line + 1, reason
-
-
+
+
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
- if hasattr(report, "wasxfail"):
- if report.skipped:
+ if hasattr(report, "wasxfail"):
+ if report.skipped:
return "xfailed", "x", "XFAIL"
- elif report.passed:
+ elif report.passed:
return "xpassed", "X", "XPASS"
return None