aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/pytest/py3/_pytest/skipping.py
diff options
context:
space:
mode:
authorarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-02-14 00:49:36 +0300
committerarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-02-14 00:49:36 +0300
commit82cfd1b7cab2d843cdf5467d9737f72597a493bd (patch)
tree1dfdcfe81a1a6b193ceacc2a828c521b657a339b /contrib/python/pytest/py3/_pytest/skipping.py
parent3df7211d3e3691f8e33b0a1fb1764fe810d59302 (diff)
downloadydb-82cfd1b7cab2d843cdf5467d9737f72597a493bd.tar.gz
intermediate changes
ref:68b1302de4b5da30b6bdf02193f7a2604d8b5cf8
Diffstat (limited to 'contrib/python/pytest/py3/_pytest/skipping.py')
-rw-r--r--contrib/python/pytest/py3/_pytest/skipping.py76
1 files changed, 24 insertions, 52 deletions
diff --git a/contrib/python/pytest/py3/_pytest/skipping.py b/contrib/python/pytest/py3/_pytest/skipping.py
index 9aacfecee7..ac7216f838 100644
--- a/contrib/python/pytest/py3/_pytest/skipping.py
+++ b/contrib/python/pytest/py3/_pytest/skipping.py
@@ -21,7 +21,7 @@ from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.reports import BaseReport
from _pytest.runner import CallInfo
-from _pytest.store import StoreKey
+from _pytest.stash import StashKey
def pytest_addoption(parser: Parser) -> None:
@@ -49,7 +49,7 @@ def pytest_configure(config: Config) -> None:
import pytest
old = pytest.xfail
- config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+ config.add_cleanup(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
@@ -68,7 +68,7 @@ def pytest_configure(config: Config) -> None:
"skipif(condition, ..., *, reason=...): "
"skip the given test function if any of the conditions evaluate to True. "
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
- "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
+ "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
)
config.addinivalue_line(
"markers",
@@ -78,7 +78,7 @@ def pytest_configure(config: Config) -> None:
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
- "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
+ "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
)
@@ -157,11 +157,11 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
return result, reason
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class Skip:
"""The result of evaluate_skip_marks()."""
- reason = attr.ib(type=str)
+ reason: str = "unconditional skip"
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
@@ -184,25 +184,22 @@ def evaluate_skip_marks(item: Item) -> Optional[Skip]:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
- if "reason" in mark.kwargs:
- reason = mark.kwargs["reason"]
- elif mark.args:
- reason = mark.args[0]
- else:
- reason = "unconditional skip"
- return Skip(reason)
+ try:
+ return Skip(*mark.args, **mark.kwargs)
+ except TypeError as e:
+ raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
return None
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class Xfail:
"""The result of evaluate_xfail_marks()."""
- reason = attr.ib(type=str)
- run = attr.ib(type=bool)
- strict = attr.ib(type=bool)
- raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]])
+ reason: str
+ run: bool
+ strict: bool
+ raises: Optional[Tuple[Type[BaseException], ...]]
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
@@ -230,30 +227,26 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
return None
-# Whether skipped due to skip or skipif marks.
-skipped_by_mark_key = StoreKey[bool]()
# Saves the xfail mark evaluation. Can be refreshed during call if None.
-xfailed_key = StoreKey[Optional[Xfail]]()
-unexpectedsuccess_key = StoreKey[str]()
+xfailed_key = StashKey[Optional[Xfail]]()
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None:
skipped = evaluate_skip_marks(item)
- item._store[skipped_by_mark_key] = skipped is not None
if skipped:
- skip(skipped.reason)
+ raise skip.Exception(skipped.reason, _use_item_location=True)
- item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@hookimpl(hookwrapper=True)
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
- xfailed = item._store.get(xfailed_key, None)
+ xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
- item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@@ -261,25 +254,17 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
yield
# The test run may have added an xfail mark dynamically.
- xfailed = item._store.get(xfailed_key, None)
+ xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
- item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield
rep = outcome.get_result()
- xfailed = item._store.get(xfailed_key, None)
- # unittest special case, see setting of unexpectedsuccess_key
- if unexpectedsuccess_key in item._store and rep.when == "call":
- reason = item._store[unexpectedsuccess_key]
- if reason:
- rep.longrepr = f"Unexpected success: {reason}"
- else:
- rep.longrepr = "Unexpected success"
- rep.outcome = "failed"
- elif item.config.option.runxfail:
+ xfailed = item.stash.get(xfailed_key, None)
+ if item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None
@@ -301,19 +286,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
rep.outcome = "passed"
rep.wasxfail = xfailed.reason
- if (
- item._store.get(skipped_by_mark_key, True)
- and rep.skipped
- and type(rep.longrepr) is tuple
- ):
- # Skipped by mark.skipif; change the location of the failure
- # to point to the item definition, otherwise it will display
- # the location of where the skip exception was raised within pytest.
- _, _, reason = rep.longrepr
- filename, line = item.reportinfo()[:2]
- assert line is not None
- rep.longrepr = str(filename), line + 1, reason
-
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"):