diff options
author | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-02-14 00:49:36 +0300 |
---|---|---|
committer | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-02-14 00:49:36 +0300 |
commit | 82cfd1b7cab2d843cdf5467d9737f72597a493bd (patch) | |
tree | 1dfdcfe81a1a6b193ceacc2a828c521b657a339b /contrib/python/pytest/py3/_pytest | |
parent | 3df7211d3e3691f8e33b0a1fb1764fe810d59302 (diff) | |
download | ydb-82cfd1b7cab2d843cdf5467d9737f72597a493bd.tar.gz |
intermediate changes
ref:68b1302de4b5da30b6bdf02193f7a2604d8b5cf8
Diffstat (limited to 'contrib/python/pytest/py3/_pytest')
58 files changed, 3891 insertions, 2411 deletions
diff --git a/contrib/python/pytest/py3/_pytest/__init__.py b/contrib/python/pytest/py3/_pytest/__init__.py index 46c7827ed5..8a406c5c75 100644 --- a/contrib/python/pytest/py3/_pytest/__init__.py +++ b/contrib/python/pytest/py3/_pytest/__init__.py @@ -1,8 +1,9 @@ -__all__ = ["__version__"] +__all__ = ["__version__", "version_tuple"] try: - from ._version import version as __version__ -except ImportError: + from ._version import version as __version__, version_tuple +except ImportError: # pragma: no cover # broken installation, we don't even try # unknown only works because we do poor mans version compare __version__ = "unknown" + version_tuple = (0, 0, "unknown") # type:ignore[assignment] diff --git a/contrib/python/pytest/py3/_pytest/_code/code.py b/contrib/python/pytest/py3/_pytest/_code/code.py index 423069330a..5b758a8848 100644 --- a/contrib/python/pytest/py3/_pytest/_code/code.py +++ b/contrib/python/pytest/py3/_pytest/_code/code.py @@ -1,4 +1,6 @@ +import ast import inspect +import os import re import sys import traceback @@ -12,6 +14,7 @@ from types import FrameType from types import TracebackType from typing import Any from typing import Callable +from typing import ClassVar from typing import Dict from typing import Generic from typing import Iterable @@ -31,7 +34,6 @@ from weakref import ref import attr import pluggy -import py import _pytest from _pytest._code.source import findsource @@ -43,9 +45,13 @@ from _pytest._io.saferepr import safeformat from _pytest._io.saferepr import saferepr from _pytest.compat import final from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath if TYPE_CHECKING: from typing_extensions import Literal + from typing_extensions import SupportsIndex from weakref import ReferenceType _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] @@ -78,16 +84,16 @@ class Code: return self.raw.co_name @property - def path(self) -> Union[py.path.local, str]: + def path(self) -> Union[Path, str]: """Return a path object pointing to source code, or an ``str`` in case of ``OSError`` / non-existing file.""" if not self.raw.co_filename: return "" try: - p = py.path.local(self.raw.co_filename) + p = absolutepath(self.raw.co_filename) # maybe don't try this checking - if not p.check(): - raise OSError("py.path check failed.") + if not p.exists(): + raise OSError("path check failed.") return p except OSError: # XXX maybe try harder like the weird logic @@ -223,7 +229,7 @@ class TracebackEntry: return source.getstatement(self.lineno) @property - def path(self) -> Union[py.path.local, str]: + def path(self) -> Union[Path, str]: """Path to the source code.""" return self.frame.code.path @@ -235,7 +241,9 @@ class TracebackEntry: def getfirstlinesource(self) -> int: return self.frame.code.firstlineno - def getsource(self, astcache=None) -> Optional["Source"]: + def getsource( + self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None + ) -> Optional["Source"]: """Return failing source code.""" # we use the passed in astcache to not reparse asttrees # within exception info printing @@ -255,7 +263,7 @@ class TracebackEntry: except SyntaxError: end = self.lineno + 1 else: - if key is not None: + if key is not None and astcache is not None: astcache[key] = astnode return source[start:end] @@ -270,9 +278,9 @@ class TracebackEntry: Mostly for internal use. """ - tbh: Union[bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]] = ( - False - ) + tbh: Union[ + bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool] + ] = False for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): # in normal cases, f_locals and f_globals are dictionaries # however via `exec(...)` / `eval(...)` they can be other types @@ -336,10 +344,10 @@ class Traceback(List[TracebackEntry]): def cut( self, - path=None, + path: Optional[Union["os.PathLike[str]", str]] = None, lineno: Optional[int] = None, firstlineno: Optional[int] = None, - excludepath: Optional[py.path.local] = None, + excludepath: Optional["os.PathLike[str]"] = None, ) -> "Traceback": """Return a Traceback instance wrapping part of this Traceback. @@ -350,31 +358,37 @@ class Traceback(List[TracebackEntry]): for formatting reasons (removing some uninteresting bits that deal with handling of the exception/traceback). """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) for x in self: code = x.frame.code codepath = code.path + if path is not None and str(codepath) != path_: + continue if ( - (path is None or codepath == path) - and ( - excludepath is None - or not isinstance(codepath, py.path.local) - or not codepath.relto(excludepath) - ) - and (lineno is None or x.lineno == lineno) - and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] ): - return Traceback(x._rawentry, self._excinfo) + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry, self._excinfo) return self @overload - def __getitem__(self, key: int) -> TracebackEntry: + def __getitem__(self, key: "SupportsIndex") -> TracebackEntry: ... @overload def __getitem__(self, key: slice) -> "Traceback": ... - def __getitem__(self, key: Union[int, slice]) -> Union[TracebackEntry, "Traceback"]: + def __getitem__( + self, key: Union["SupportsIndex", slice] + ) -> Union[TracebackEntry, "Traceback"]: if isinstance(key, slice): return self.__class__(super().__getitem__(key)) else: @@ -418,41 +432,45 @@ class Traceback(List[TracebackEntry]): f = entry.frame loc = f.f_locals for otherloc in values: - if f.eval( - co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc, - ): + if otherloc == loc: return i values.append(entry.frame.f_locals) return None -co_equal = compile( - "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" -) - - -_E = TypeVar("_E", bound=BaseException, covariant=True) +E = TypeVar("E", bound=BaseException, covariant=True) @final -@attr.s(repr=False) -class ExceptionInfo(Generic[_E]): +@attr.s(repr=False, init=False, auto_attribs=True) +class ExceptionInfo(Generic[E]): """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" - _assert_start_repr = "AssertionError('assert " + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: Optional[Tuple[Type["E"], "E", TracebackType]] + _striptext: str + _traceback: Optional[Traceback] - _excinfo = attr.ib(type=Optional[Tuple[Type["_E"], "_E", TracebackType]]) - _striptext = attr.ib(type=str, default="") - _traceback = attr.ib(type=Optional[Traceback], default=None) + def __init__( + self, + excinfo: Optional[Tuple[Type["E"], "E", TracebackType]], + striptext: str = "", + traceback: Optional[Traceback] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback @classmethod def from_exc_info( cls, - exc_info: Tuple[Type[_E], _E, TracebackType], + exc_info: Tuple[Type[E], E, TracebackType], exprinfo: Optional[str] = None, - ) -> "ExceptionInfo[_E]": + ) -> "ExceptionInfo[E]": """Return an ExceptionInfo for an existing exc_info tuple. .. warning:: @@ -472,7 +490,7 @@ class ExceptionInfo(Generic[_E]): if exprinfo and exprinfo.startswith(cls._assert_start_repr): _striptext = "AssertionError: " - return cls(exc_info, _striptext) + return cls(exc_info, _striptext, _ispytest=True) @classmethod def from_current( @@ -497,17 +515,17 @@ class ExceptionInfo(Generic[_E]): return ExceptionInfo.from_exc_info(exc_info, exprinfo) @classmethod - def for_later(cls) -> "ExceptionInfo[_E]": + def for_later(cls) -> "ExceptionInfo[E]": """Return an unfilled ExceptionInfo.""" - return cls(None) + return cls(None, _ispytest=True) - def fill_unfilled(self, exc_info: Tuple[Type[_E], _E, TracebackType]) -> None: + def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None: """Fill an unfilled ExceptionInfo created with ``for_later()``.""" assert self._excinfo is None, "ExceptionInfo was already filled" self._excinfo = exc_info @property - def type(self) -> Type[_E]: + def type(self) -> Type[E]: """The exception class.""" assert ( self._excinfo is not None @@ -515,7 +533,7 @@ class ExceptionInfo(Generic[_E]): return self._excinfo[0] @property - def value(self) -> _E: + def value(self) -> E: """The exception value.""" assert ( self._excinfo is not None @@ -559,10 +577,10 @@ class ExceptionInfo(Generic[_E]): def exconly(self, tryshort: bool = False) -> str: """Return the exception as a string. - When 'tryshort' resolves to True, and the exception is a - _pytest._code._AssertionError, only the actual exception part of - the exception representation is returned (so 'AssertionError: ' is - removed from the beginning). + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). """ lines = format_exception_only(self.type, self.value) text = "".join(lines) @@ -662,22 +680,24 @@ class ExceptionInfo(Generic[_E]): return True -@attr.s +@attr.s(auto_attribs=True) class FormattedExcinfo: """Presenting information about failing Functions and Generators.""" # for traceback entries - flow_marker = ">" - fail_marker = "E" - - showlocals = attr.ib(type=bool, default=False) - style = attr.ib(type="_TracebackStyle", default="long") - abspath = attr.ib(type=bool, default=True) - tbfilter = attr.ib(type=bool, default=True) - funcargs = attr.ib(type=bool, default=False) - truncate_locals = attr.ib(type=bool, default=True) - chain = attr.ib(type=bool, default=True) - astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: "_TracebackStyle" = "long" + abspath: bool = True + tbfilter: bool = True + funcargs: bool = False + truncate_locals: bool = True + chain: bool = True + astcache: Dict[Union[str, Path], ast.AST] = attr.ib( + factory=dict, init=False, repr=False + ) def _getindent(self, source: "Source") -> int: # Figure out indent for the given source. @@ -801,7 +821,8 @@ class FormattedExcinfo: message = "in %s" % (entry.name) else: message = excinfo and excinfo.typename or "" - path = self._makepath(entry.path) + entry_path = entry.path + path = self._makepath(entry_path) reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) localsrepr = self.repr_locals(entry.locals) return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) @@ -814,15 +835,15 @@ class FormattedExcinfo: lines.extend(self.get_exconly(excinfo, indent=4)) return ReprEntry(lines, None, None, None, style) - def _makepath(self, path): - if not self.abspath: + def _makepath(self, path: Union[Path, str]) -> str: + if not self.abspath and isinstance(path, Path): try: - np = py.path.local().bestrelpath(path) + np = bestrelpath(Path.cwd(), path) except OSError: - return path + return str(path) if len(np) < len(str(path)): - path = np - return path + return np + return str(path) def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback": traceback = excinfo.traceback @@ -877,7 +898,7 @@ class FormattedExcinfo: max_frames=max_frames, total=len(traceback), ) - # Type ignored because adding two instaces of a List subtype + # Type ignored because adding two instances of a List subtype # currently incorrectly has type List instead of the subtype. traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore else: @@ -918,7 +939,7 @@ class FormattedExcinfo: if e.__cause__ is not None and self.chain: e = e.__cause__ excinfo_ = ( - ExceptionInfo((type(e), e, e.__traceback__)) + ExceptionInfo.from_exc_info((type(e), e, e.__traceback__)) if e.__traceback__ else None ) @@ -928,7 +949,7 @@ class FormattedExcinfo: ): e = e.__context__ excinfo_ = ( - ExceptionInfo((type(e), e, e.__traceback__)) + ExceptionInfo.from_exc_info((type(e), e, e.__traceback__)) if e.__traceback__ else None ) @@ -939,7 +960,7 @@ class FormattedExcinfo: return ExceptionChainRepr(repr_chain) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class TerminalRepr: def __str__(self) -> str: # FYI this is called from pytest-xdist's serialization of exception @@ -950,7 +971,7 @@ class TerminalRepr: return io.getvalue().strip() def __repr__(self) -> str: - return "<{} instance at {:0x}>".format(self.__class__, id(self)) + return f"<{self.__class__} instance at {id(self):0x}>" def toterminal(self, tw: TerminalWriter) -> None: raise NotImplementedError() @@ -975,13 +996,9 @@ class ExceptionRepr(TerminalRepr): tw.line(content) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ExceptionChainRepr(ExceptionRepr): - chain = attr.ib( - type=Sequence[ - Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]] - ] - ) + chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]] def __attrs_post_init__(self) -> None: super().__attrs_post_init__() @@ -999,23 +1016,23 @@ class ExceptionChainRepr(ExceptionRepr): super().toterminal(tw) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprExceptionInfo(ExceptionRepr): - reprtraceback = attr.ib(type="ReprTraceback") - reprcrash = attr.ib(type="ReprFileLocation") + reprtraceback: "ReprTraceback" + reprcrash: "ReprFileLocation" def toterminal(self, tw: TerminalWriter) -> None: self.reprtraceback.toterminal(tw) super().toterminal(tw) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprTraceback(TerminalRepr): - reprentries = attr.ib(type=Sequence[Union["ReprEntry", "ReprEntryNative"]]) - extraline = attr.ib(type=Optional[str]) - style = attr.ib(type="_TracebackStyle") + reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]] + extraline: Optional[str] + style: "_TracebackStyle" - entrysep = "_ " + entrysep: ClassVar = "_ " def toterminal(self, tw: TerminalWriter) -> None: # The entries might have different styles. @@ -1043,22 +1060,23 @@ class ReprTracebackNative(ReprTraceback): self.extraline = None -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprEntryNative(TerminalRepr): - lines = attr.ib(type=Sequence[str]) - style: "_TracebackStyle" = "native" + lines: Sequence[str] + + style: ClassVar["_TracebackStyle"] = "native" def toterminal(self, tw: TerminalWriter) -> None: tw.write("".join(self.lines)) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprEntry(TerminalRepr): - lines = attr.ib(type=Sequence[str]) - reprfuncargs = attr.ib(type=Optional["ReprFuncArgs"]) - reprlocals = attr.ib(type=Optional["ReprLocals"]) - reprfileloc = attr.ib(type=Optional["ReprFileLocation"]) - style = attr.ib(type="_TracebackStyle") + lines: Sequence[str] + reprfuncargs: Optional["ReprFuncArgs"] + reprlocals: Optional["ReprLocals"] + reprfileloc: Optional["ReprFileLocation"] + style: "_TracebackStyle" def _write_entry_lines(self, tw: TerminalWriter) -> None: """Write the source code portions of a list of traceback entries with syntax highlighting. @@ -1132,11 +1150,11 @@ class ReprEntry(TerminalRepr): ) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprFileLocation(TerminalRepr): - path = attr.ib(type=str, converter=str) - lineno = attr.ib(type=int) - message = attr.ib(type=str) + path: str = attr.ib(converter=str) + lineno: int + message: str def toterminal(self, tw: TerminalWriter) -> None: # Filename and lineno output for each entry, using an output format @@ -1149,18 +1167,18 @@ class ReprFileLocation(TerminalRepr): tw.line(f":{self.lineno}: {msg}") -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprLocals(TerminalRepr): - lines = attr.ib(type=Sequence[str]) + lines: Sequence[str] def toterminal(self, tw: TerminalWriter, indent="") -> None: for line in self.lines: tw.line(indent + line) -@attr.s(eq=False) +@attr.s(eq=False, auto_attribs=True) class ReprFuncArgs(TerminalRepr): - args = attr.ib(type=Sequence[Tuple[str, object]]) + args: Sequence[Tuple[str, object]] def toterminal(self, tw: TerminalWriter) -> None: if self.args: @@ -1181,7 +1199,7 @@ class ReprFuncArgs(TerminalRepr): tw.line("") -def getfslineno(obj: object) -> Tuple[Union[str, py.path.local], int]: +def getfslineno(obj: object) -> Tuple[Union[str, Path], int]: """Return source location (path, lineno) for the given object. If the source cannot be determined return ("", -1). @@ -1203,7 +1221,7 @@ def getfslineno(obj: object) -> Tuple[Union[str, py.path.local], int]: except TypeError: return "", -1 - fspath = fn and py.path.local(fn) or "" + fspath = fn and absolutepath(fn) or "" lineno = -1 if fspath: try: @@ -1225,7 +1243,6 @@ _PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) if _PLUGGY_DIR.name == "__init__.py": _PLUGGY_DIR = _PLUGGY_DIR.parent _PYTEST_DIR = Path(_pytest.__file__).parent -_PY_DIR = Path(py.__file__).parent def filter_traceback(entry: TracebackEntry) -> bool: @@ -1253,7 +1270,5 @@ def filter_traceback(entry: TracebackEntry) -> bool: return False if _PYTEST_DIR in parents: return False - if _PY_DIR in parents: - return False return True diff --git a/contrib/python/pytest/py3/_pytest/_code/source.py b/contrib/python/pytest/py3/_pytest/_code/source.py index 6f54057c0a..208cfb8003 100644 --- a/contrib/python/pytest/py3/_pytest/_code/source.py +++ b/contrib/python/pytest/py3/_pytest/_code/source.py @@ -149,6 +149,11 @@ def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[i values: List[int] = [] for x in ast.walk(node): if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # Before Python 3.8, the lineno of a decorated class or function pointed at the decorator. + # Since Python 3.8, the lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) values.append(x.lineno - 1) for name in ("finalbody", "orelse"): val: Optional[List[ast.stmt]] = getattr(x, name, None) diff --git a/contrib/python/pytest/py3/_pytest/_io/saferepr.py b/contrib/python/pytest/py3/_pytest/_io/saferepr.py index 5eb1e08890..e7ff5cab20 100644 --- a/contrib/python/pytest/py3/_pytest/_io/saferepr.py +++ b/contrib/python/pytest/py3/_pytest/_io/saferepr.py @@ -12,7 +12,7 @@ def _try_repr_or_str(obj: object) -> str: except (KeyboardInterrupt, SystemExit): raise except BaseException: - return '{}("{}")'.format(type(obj).__name__, obj) + return f'{type(obj).__name__}("{obj}")' def _format_repr_exception(exc: BaseException, obj: object) -> str: @@ -21,7 +21,7 @@ def _format_repr_exception(exc: BaseException, obj: object) -> str: except (KeyboardInterrupt, SystemExit): raise except BaseException as exc: - exc_info = "unpresentable exception ({})".format(_try_repr_or_str(exc)) + exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})" return "<[{} raised in repr()] {} object at 0x{:x}>".format( exc_info, type(obj).__name__, id(obj) ) @@ -36,12 +36,23 @@ def _ellipsize(s: str, maxsize: int) -> str: class SafeRepr(reprlib.Repr): - """repr.Repr that limits the resulting size of repr() and includes - information on exceptions raised during the call.""" + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ - def __init__(self, maxsize: int) -> None: + def __init__(self, maxsize: Optional[int]) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ super().__init__() - self.maxstring = maxsize + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 self.maxsize = maxsize def repr(self, x: object) -> str: @@ -51,7 +62,9 @@ class SafeRepr(reprlib.Repr): raise except BaseException as exc: s = _format_repr_exception(exc, x) - return _ellipsize(s, self.maxsize) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s def repr_instance(self, x: object, level: int) -> str: try: @@ -60,7 +73,9 @@ class SafeRepr(reprlib.Repr): raise except BaseException as exc: s = _format_repr_exception(exc, x) - return _ellipsize(s, self.maxsize) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s def safeformat(obj: object) -> str: @@ -75,7 +90,11 @@ def safeformat(obj: object) -> str: return _format_repr_exception(exc, obj) -def saferepr(obj: object, maxsize: int = 240) -> str: +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr(obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE) -> str: """Return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented @@ -83,7 +102,7 @@ def saferepr(obj: object, maxsize: int = 240) -> str: care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the - standard 2.6 lib. + stdlib. """ return SafeRepr(maxsize).repr(obj) @@ -107,7 +126,12 @@ class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter): if objid in context or p is None: # Type ignored because _format is private. super()._format( # type: ignore[misc] - object, stream, indent, allowance, context, level, + object, + stream, + indent, + allowance, + context, + level, ) return diff --git a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py b/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py index 8edf4cd75f..379035d858 100644 --- a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py +++ b/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py @@ -195,16 +195,39 @@ class TerminalWriter: def _highlight(self, source: str) -> str: """Highlight the given source code if we have markup support.""" + from _pytest.config.exceptions import UsageError + if not self.hasmarkup or not self.code_highlight: return source try: from pygments.formatters.terminal import TerminalFormatter from pygments.lexers.python import PythonLexer from pygments import highlight + import pygments.util except ImportError: return source else: - highlighted: str = highlight( - source, PythonLexer(), TerminalFormatter(bg="dark") - ) - return highlighted + try: + highlighted: str = highlight( + source, + PythonLexer(), + TerminalFormatter( + bg=os.getenv("PYTEST_THEME_MODE", "dark"), + style=os.getenv("PYTEST_THEME"), + ), + ) + return highlighted + except pygments.util.ClassNotFound: + raise UsageError( + "PYTEST_THEME environment variable had an invalid value: '{}'. " + "Only valid pygment styles are allowed.".format( + os.getenv("PYTEST_THEME") + ) + ) + except pygments.util.OptionError: + raise UsageError( + "PYTEST_THEME_MODE environment variable had an invalid value: '{}'. " + "The only allowed values are 'dark' and 'light'.".format( + os.getenv("PYTEST_THEME_MODE") + ) + ) diff --git a/contrib/python/pytest/py3/_pytest/_version.py b/contrib/python/pytest/py3/_pytest/_version.py index 83518587e4..5515abadad 100644 --- a/contrib/python/pytest/py3/_pytest/_version.py +++ b/contrib/python/pytest/py3/_pytest/_version.py @@ -1,5 +1,5 @@ # coding: utf-8 # file generated by setuptools_scm # don't change, don't track in version control -version = '6.2.5' -version_tuple = (6, 2, 5) +version = '7.0.1' +version_tuple = (7, 0, 1) diff --git a/contrib/python/pytest/py3/_pytest/assertion/__init__.py b/contrib/python/pytest/py3/_pytest/assertion/__init__.py index a18cf198df..480a26ad86 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/__init__.py +++ b/contrib/python/pytest/py3/_pytest/assertion/__init__.py @@ -88,13 +88,13 @@ class AssertionState: def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: """Try to install the rewrite hook, raise SystemError if it fails.""" - config._store[assertstate_key] = AssertionState(config, "rewrite") - config._store[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) sys.meta_path.insert(0, hook) - config._store[assertstate_key].trace("installed rewrite import hook") + config.stash[assertstate_key].trace("installed rewrite import hook") def undo() -> None: - hook = config._store[assertstate_key].hook + hook = config.stash[assertstate_key].hook if hook is not None and hook in sys.meta_path: sys.meta_path.remove(hook) @@ -104,9 +104,9 @@ def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: def pytest_collection(session: "Session") -> None: # This hook is only called when test modules are collected - # so for example not in the master process of pytest-xdist + # so for example not in the managing process of pytest-xdist # (which does not collect test modules). - assertstate = session.config._store.get(assertstate_key, None) + assertstate = session.config.stash.get(assertstate_key, None) if assertstate: if assertstate.hook is not None: assertstate.hook.set_session(session) @@ -153,6 +153,7 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: saved_assert_hooks = util._reprcompare, util._assertion_pass util._reprcompare = callbinrepr + util._config = item.config if ihook.pytest_assertion_pass.get_hookimpls(): @@ -164,10 +165,11 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: yield util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None def pytest_sessionfinish(session: "Session") -> None: - assertstate = session.config._store.get(assertstate_key, None) + assertstate = session.config.stash.get(assertstate_key, None) if assertstate: if assertstate.hook is not None: assertstate.hook.set_session(None) diff --git a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py b/contrib/python/pytest/py3/_pytest/assertion/rewrite.py index 37ff076aab..88ac6cab36 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py +++ b/contrib/python/pytest/py3/_pytest/assertion/rewrite.py @@ -19,6 +19,7 @@ from typing import Callable from typing import Dict from typing import IO from typing import Iterable +from typing import Iterator from typing import List from typing import Optional from typing import Sequence @@ -27,8 +28,7 @@ from typing import Tuple from typing import TYPE_CHECKING from typing import Union -import py - +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE from _pytest._io.saferepr import saferepr from _pytest._version import version from _pytest.assertion import util @@ -37,14 +37,15 @@ from _pytest.assertion.util import ( # noqa: F401 ) from _pytest.config import Config from _pytest.main import Session +from _pytest.pathlib import absolutepath from _pytest.pathlib import fnmatch_ex -from _pytest.store import StoreKey +from _pytest.stash import StashKey if TYPE_CHECKING: from _pytest.assertion import AssertionState -assertstate_key = StoreKey["AssertionState"]() +assertstate_key = StashKey["AssertionState"]() # pytest caches rewritten pycs in pycache dirs @@ -63,7 +64,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader) except ValueError: self.fnpats = ["test_*.py", "*_test.py"] self.session: Optional[Session] = None - self._rewritten_names: Set[str] = set() + self._rewritten_names: Dict[str, Path] = {} self._must_rewrite: Set[str] = set() # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, # which might result in infinite recursion (#3506) @@ -87,7 +88,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader) ) -> Optional[importlib.machinery.ModuleSpec]: if self._writing_pyc: return None - state = self.config._store[assertstate_key] + state = self.config.stash[assertstate_key] if self._early_rewrite_bailout(name, state): return None state.trace("find_module called for: %s" % name) @@ -131,9 +132,9 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader) assert module.__spec__ is not None assert module.__spec__.origin is not None fn = Path(module.__spec__.origin) - state = self.config._store[assertstate_key] + state = self.config.stash[assertstate_key] - self._rewritten_names.add(module.__name__) + self._rewritten_names[module.__name__] = fn # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the @@ -215,7 +216,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader) return True if self.session is not None: - if self.session.isinitpath(py.path.local(fn)): + if self.session.isinitpath(absolutepath(fn)): state.trace(f"matched test file (was specified on cmdline): {fn!r}") return True @@ -275,6 +276,16 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader) with open(pathname, "rb") as f: return f.read() + if sys.version_info >= (3, 10): + + def get_resource_reader(self, name: str) -> importlib.abc.TraversableResources: # type: ignore + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + def _write_pyc_fp( fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType @@ -333,7 +344,7 @@ else: try: _write_pyc_fp(fp, source_stat, co) - os.rename(proc_pyc, os.fspath(pyc)) + os.rename(proc_pyc, pyc) except OSError as e: state.trace(f"error writing pyc file at {pyc}: {e}") # we ignore any failure to write the cache file @@ -347,13 +358,12 @@ else: def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]: """Read and rewrite *fn* and return the code object.""" - fn_ = os.fspath(fn) - stat = os.stat(fn_) - with open(fn_, "rb") as f: - source = f.read() - tree = ast.parse(source, filename=fn_) - rewrite_asserts(tree, source, fn_, config) - co = compile(tree, fn_, "exec", dont_inherit=True) + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) return stat, co @@ -365,14 +375,14 @@ def _read_pyc( Return rewritten code if successful or None if not. """ try: - fp = open(os.fspath(pyc), "rb") + fp = open(pyc, "rb") except OSError: return None with fp: # https://www.python.org/dev/peps/pep-0552/ has_flags = sys.version_info >= (3, 7) try: - stat_result = os.stat(os.fspath(source)) + stat_result = os.stat(source) mtime = int(stat_result.st_mtime) size = stat_result.st_size data = fp.read(16 if has_flags else 12) @@ -428,7 +438,18 @@ def _saferepr(obj: object) -> str: sequences, especially '\n{' and '\n}' are likely to be present in JSON reprs. """ - return saferepr(obj).replace("\n", "\\n") + maxsize = _get_maxsize_for_saferepr(util._config) + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]: + """Get `maxsize` configuration for saferepr based on the given config object.""" + verbosity = config.getoption("verbose") if config is not None else 0 + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE def _format_assertmsg(obj: object) -> str: @@ -495,7 +516,7 @@ def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: def _check_if_assertion_pass_impl() -> bool: """Check if any plugins implement the pytest_assertion_pass hook - in order not to generate explanation unecessarily (might be expensive).""" + in order not to generate explanation unnecessarily (might be expensive).""" return True if util._assertion_pass else False @@ -528,21 +549,14 @@ BINOP_MAP = { } -def set_location(node, lineno, col_offset): - """Set node location information recursively.""" - - def _fix(node, lineno, col_offset): - if "lineno" in node._attributes: - node.lineno = lineno - if "col_offset" in node._attributes: - node.col_offset = col_offset - for child in ast.iter_child_nodes(node): - _fix(child, lineno, col_offset) - - _fix(node, lineno, col_offset) - return node +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) +@functools.lru_cache(maxsize=1) def _get_assertion_exprs(src: bytes) -> Dict[int, str]: """Return a mapping from {lineno: "assertion test expression"}.""" ret: Dict[int, str] = {} @@ -664,10 +678,6 @@ class AssertionRewriter(ast.NodeVisitor): self.enable_assertion_pass_hook = False self.source = source - @functools.lru_cache(maxsize=1) - def _assert_expr_to_lineno(self) -> Dict[int, str]: - return _get_assertion_exprs(self.source) - def run(self, mod: ast.Module) -> None: """Find all assert statements in *mod* and rewrite them.""" if not mod.body: @@ -854,7 +864,7 @@ class AssertionRewriter(ast.NodeVisitor): "assertion is always true, perhaps remove parentheses?" ), category=None, - filename=os.fspath(self.module_path), + filename=self.module_path, lineno=assert_.lineno, ) @@ -895,7 +905,7 @@ class AssertionRewriter(ast.NodeVisitor): # Passed fmt_pass = self.helper("_format_explanation", msg) - orig = self._assert_expr_to_lineno()[assert_.lineno] + orig = _get_assertion_exprs(self.source)[assert_.lineno] hook_call_pass = ast.Expr( self.helper( "_call_assertion_pass", @@ -946,9 +956,10 @@ class AssertionRewriter(ast.NodeVisitor): variables = [ast.Name(name, ast.Store()) for name in self.variables] clear = ast.Assign(variables, ast.NameConstant(None)) self.statements.append(clear) - # Fix line numbers. + # Fix locations (line numbers/column offsets). for stmt in self.statements: - set_location(stmt, assert_.lineno, assert_.col_offset) + for node in traverse_node(stmt): + ast.copy_location(node, assert_) return self.statements def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]: @@ -1095,7 +1106,7 @@ def try_makedirs(cache_dir: Path) -> bool: Returns True if successful or if it already exists. """ try: - os.makedirs(os.fspath(cache_dir), exist_ok=True) + os.makedirs(cache_dir, exist_ok=True) except (FileNotFoundError, NotADirectoryError, FileExistsError): # One of the path components was not a directory: # - we're in a zip file diff --git a/contrib/python/pytest/py3/_pytest/assertion/truncate.py b/contrib/python/pytest/py3/_pytest/assertion/truncate.py index 5ba9ddca75..ce148dca09 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/truncate.py +++ b/contrib/python/pytest/py3/_pytest/assertion/truncate.py @@ -3,10 +3,10 @@ Current default behaviour is to truncate assertion explanations at ~8 terminal lines, unless running in "-vv" mode or running on CI. """ -import os from typing import List from typing import Optional +from _pytest.assertion import util from _pytest.nodes import Item @@ -27,13 +27,7 @@ def truncate_if_required( def _should_truncate_item(item: Item) -> bool: """Whether or not this test item is eligible for truncation.""" verbose = item.config.option.verbose - return verbose < 2 and not _running_on_ci() - - -def _running_on_ci() -> bool: - """Check if we're currently running on a CI system.""" - env_vars = ["CI", "BUILD_NUMBER"] - return any(var in os.environ for var in env_vars) + return verbose < 2 and not util.running_on_ci() def _truncate_explanation( diff --git a/contrib/python/pytest/py3/_pytest/assertion/util.py b/contrib/python/pytest/py3/_pytest/assertion/util.py index da1ffd15e3..19f1089c20 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/util.py +++ b/contrib/python/pytest/py3/_pytest/assertion/util.py @@ -1,5 +1,6 @@ """Utilities for assertion debugging.""" import collections.abc +import os import pprint from typing import AbstractSet from typing import Any @@ -15,6 +16,7 @@ from _pytest import outcomes from _pytest._io.saferepr import _pformat_dispatch from _pytest._io.saferepr import safeformat from _pytest._io.saferepr import saferepr +from _pytest.config import Config # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -26,6 +28,9 @@ _reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None # when pytest_runtest_setup is called. _assertion_pass: Optional[Callable[[int, str, str], None]] = None +# Config object which is assigned during pytest_runtest_protocol. +_config: Optional[Config] = None + def format_explanation(explanation: str) -> str: r"""Format an explanation. @@ -175,7 +180,15 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: if istext(left) and istext(right): explanation = _diff_text(left, right, verbose) else: - if type(left) == type(right) and ( + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) == type(right) and ( isdatacls(left) or isattrs(left) or isnamedtuple(left) ): # Note: unlike dataclasses/attrs, namedtuples compare only the @@ -191,9 +204,11 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: explanation = _compare_eq_dict(left, right, verbose) elif verbose > 0: explanation = _compare_eq_verbose(left, right) + if isiterable(left) and isiterable(right): expl = _compare_eq_iterable(left, right, verbose) explanation.extend(expl) + return explanation @@ -272,7 +287,7 @@ def _surrounding_parens_on_own_lines(lines: List[str]) -> None: def _compare_eq_iterable( left: Iterable[Any], right: Iterable[Any], verbose: int = 0 ) -> List[str]: - if not verbose: + if not verbose and not running_on_ci(): return ["Use -v to get the full diff"] # dynamic import to speedup pytest import difflib @@ -475,3 +490,9 @@ def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]: else: newdiff.append(line) return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/contrib/python/pytest/py3/_pytest/cacheprovider.py b/contrib/python/pytest/py3/_pytest/cacheprovider.py index 03acd03109..681d02b409 100644 --- a/contrib/python/pytest/py3/_pytest/cacheprovider.py +++ b/contrib/python/pytest/py3/_pytest/cacheprovider.py @@ -13,7 +13,6 @@ from typing import Set from typing import Union import attr -import py from .pathlib import resolve_from_str from .pathlib import rm_rf @@ -42,27 +41,27 @@ which provides the `--lf` and `--ff` options, as well as the `cache` fixture. **Do not** commit this to version control. -See [the docs](https://docs.pytest.org/en/stable/cache.html) for more information. +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. """ CACHEDIR_TAG_CONTENT = b"""\ Signature: 8a477f597d28d172789f06886806bc55 # This file is a cache directory tag created by pytest. # For information about cache directory tags, see: -# http://www.bford.info/cachedir/spec.html +# https://bford.info/cachedir/spec.html """ @final -@attr.s(init=False) +@attr.s(init=False, auto_attribs=True) class Cache: - _cachedir = attr.ib(type=Path, repr=False) - _config = attr.ib(type=Config, repr=False) + _cachedir: Path = attr.ib(repr=False) + _config: Config = attr.ib(repr=False) - # sub-directory under cache-dir for directories created by "makedir" + # Sub-directory under cache-dir for directories created by `mkdir()`. _CACHE_PREFIX_DIRS = "d" - # sub-directory under cache-dir for values created by "set" + # Sub-directory under cache-dir for values created by `set()`. _CACHE_PREFIX_VALUES = "v" def __init__( @@ -120,13 +119,15 @@ class Cache: stacklevel=3, ) - def makedir(self, name: str) -> py.path.local: + def mkdir(self, name: str) -> Path: """Return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it to manage files to e.g. store/retrieve database dumps across test sessions. + .. versionadded:: 7.0 + :param name: Must be a string not containing a ``/`` separator. Make sure the name contains your plugin or application @@ -137,7 +138,7 @@ class Cache: raise ValueError("name is not allowed to contain path separators") res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) res.mkdir(exist_ok=True, parents=True) - return py.path.local(res) + return res def _getvaluepath(self, key: str) -> Path: return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) @@ -183,7 +184,7 @@ class Cache: return if not cache_dir_exists_already: self._ensure_supporting_files() - data = json.dumps(value, indent=2, sort_keys=True) + data = json.dumps(value, indent=2) try: f = path.open("w") except OSError: @@ -218,13 +219,17 @@ class LFPluginCollWrapper: # Sort any lf-paths to the beginning. lf_paths = self.lfplugin._last_failed_paths + res.result = sorted( - res.result, key=lambda x: 0 if Path(str(x.fspath)) in lf_paths else 1, + res.result, + # use stable sort to priorize last failed + key=lambda x: x.path in lf_paths, + reverse=True, ) return elif isinstance(collector, Module): - if Path(str(collector.fspath)) in self.lfplugin._last_failed_paths: + if collector.path in self.lfplugin._last_failed_paths: out = yield res = out.get_result() result = res.result @@ -245,7 +250,7 @@ class LFPluginCollWrapper: for x in result if x.nodeid in lastfailed # Include any passed arguments (not trivial to filter). - or session.isinitpath(x.fspath) + or session.isinitpath(x.path) # Keep all sub-collectors. or isinstance(x, nodes.Collector) ] @@ -265,7 +270,7 @@ class LFPluginCollSkipfiles: # test-bearing paths and doesn't try to include the paths of their # packages, so don't filter them. if isinstance(collector, Module) and not isinstance(collector, Package): - if Path(str(collector.fspath)) not in self.lfplugin._last_failed_paths: + if collector.path not in self.lfplugin._last_failed_paths: self.lfplugin._skipped_files += 1 return CollectReport( @@ -414,7 +419,7 @@ class NFPlugin: self.cached_nodeids.update(item.nodeid for item in items) def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]: - return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) # type: ignore[no-any-return] + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return] def pytest_sessionfinish(self) -> None: config = self.config @@ -567,8 +572,8 @@ def cacheshow(config: Config, session: Session) -> int: contents = sorted(ddir.rglob(glob)) tw.sep("-", "cache directories for %r" % glob) for p in contents: - # if p.check(dir=1): - # print("%s/" % p.relto(basedir)) + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) if p.is_file(): key = str(p.relative_to(basedir)) tw.line(f"{key} is a file of length {p.stat().st_size:d}") diff --git a/contrib/python/pytest/py3/_pytest/capture.py b/contrib/python/pytest/py3/_pytest/capture.py index 086302658c..884f035e29 100644 --- a/contrib/python/pytest/py3/_pytest/capture.py +++ b/contrib/python/pytest/py3/_pytest/capture.py @@ -68,30 +68,6 @@ def _colorama_workaround() -> None: pass -def _readline_workaround() -> None: - """Ensure readline is imported so that it attaches to the correct stdio - handles on Windows. - - Pdb uses readline support where available--when not running from the Python - prompt, the readline module is not imported until running the pdb REPL. If - running pytest with the --pdb option this means the readline module is not - imported until after I/O capture has been started. - - This is a problem for pyreadline, which is often used to implement readline - support on Windows, as it does not attach to the correct handles for stdout - and/or stdin if they have been redirected by the FDCapture mechanism. This - workaround ensures that readline is imported before I/O capture is setup so - that it can attach to the actual stdin/out for the console. - - See https://github.com/pytest-dev/pytest/pull/1281. - """ - if sys.platform.startswith("win32"): - try: - import readline # noqa: F401 - except ImportError: - pass - - def _py36_windowsconsoleio_workaround(stream: TextIO) -> None: """Workaround for Windows Unicode console handling on Python>=3.6. @@ -154,7 +130,6 @@ def pytest_load_initial_conftests(early_config: Config): if ns.capture == "fd": _py36_windowsconsoleio_workaround(sys.stdout) _colorama_workaround() - _readline_workaround() pluginmanager = early_config.pluginmanager capman = CaptureManager(ns.capture) pluginmanager.register(capman, "capturemanager") @@ -363,7 +338,7 @@ class FDCaptureBinary: except OSError: # FD capturing is conceptually simple -- create a temporary file, # redirect the FD to it, redirect back when done. But when the - # target FD is invalid it throws a wrench into this loveley scheme. + # target FD is invalid it throws a wrench into this lovely scheme. # # Tests themselves shouldn't care if the FD is valid, FD capturing # should work regardless of external circumstances. So falling back @@ -556,7 +531,11 @@ class MultiCapture(Generic[AnyStr]): def __repr__(self) -> str: return "<MultiCapture out={!r} err={!r} in_={!r} _state={!r} _in_suspended={!r}>".format( - self.out, self.err, self.in_, self._state, self._in_suspended, + self.out, + self.err, + self.in_, + self._state, + self._in_suspended, ) def start_capturing(self) -> None: @@ -614,14 +593,8 @@ class MultiCapture(Generic[AnyStr]): return self._state == "started" def readouterr(self) -> CaptureResult[AnyStr]: - if self.out: - out = self.out.snap() - else: - out = "" - if self.err: - err = self.err.snap() - else: - err = "" + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" return CaptureResult(out, err) @@ -843,7 +816,9 @@ class CaptureFixture(Generic[AnyStr]): def _start(self) -> None: if self._capture is None: self._capture = MultiCapture( - in_=None, out=self.captureclass(1), err=self.captureclass(2), + in_=None, + out=self.captureclass(1), + err=self.captureclass(2), ) self._capture.start_capturing() diff --git a/contrib/python/pytest/py3/_pytest/compat.py b/contrib/python/pytest/py3/_pytest/compat.py index c23cc962ce..25894d344d 100644 --- a/contrib/python/pytest/py3/_pytest/compat.py +++ b/contrib/python/pytest/py3/_pytest/compat.py @@ -2,7 +2,7 @@ import enum import functools import inspect -import re +import os import sys from contextlib import contextmanager from inspect import Parameter @@ -18,9 +18,7 @@ from typing import TypeVar from typing import Union import attr - -from _pytest.outcomes import fail -from _pytest.outcomes import TEST_OUTCOME +import py if TYPE_CHECKING: from typing import NoReturn @@ -30,6 +28,19 @@ if TYPE_CHECKING: _T = TypeVar("_T") _S = TypeVar("_S") +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: Union[str, "os.PathLike[str]"]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + # fmt: off # Singleton type for NOTSET, as described in: @@ -49,10 +60,6 @@ def _format_args(func: Callable[..., Any]) -> str: return str(signature(func)) -# The type of re.compile objects is not exposed in Python. -REGEX_TYPE = type(re.compile("")) - - def is_generator(func: object) -> bool: genfunc = inspect.isgeneratorfunction(func) return genfunc and not iscoroutinefunction(func) @@ -142,8 +149,11 @@ def getfuncargnames( try: parameters = signature(function).parameters except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + fail( - f"Could not determine arguments of {function!r}: {e}", pytrace=False, + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, ) arg_names = tuple( @@ -162,7 +172,12 @@ def getfuncargnames( # it's passed as an unbound method or function, remove the first # parameter name. if is_method or ( - cls and not isinstance(cls.__dict__.get(name, None), staticmethod) + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) ): arg_names = arg_names[1:] # Remove any names that will be replaced with mocks. @@ -308,6 +323,8 @@ def safe_getattr(object: Any, name: str, default: Any) -> Any: are derived from BaseException instead of Exception (for more details check #2707). """ + from _pytest.outcomes import TEST_OUTCOME + try: return getattr(object, name, default) except TEST_OUTCOME: @@ -397,4 +414,4 @@ else: # # This also work for Enums (if you use `is` to compare) and Literals. def assert_never(value: "NoReturn") -> "NoReturn": - assert False, "Unhandled value: {} ({})".format(value, type(value).__name__) + assert False, f"Unhandled value: {value} ({type(value).__name__})" diff --git a/contrib/python/pytest/py3/_pytest/config/__init__.py b/contrib/python/pytest/py3/_pytest/config/__init__.py index bd9e2883f9..ebf6e1b950 100644 --- a/contrib/python/pytest/py3/_pytest/config/__init__.py +++ b/contrib/python/pytest/py3/_pytest/config/__init__.py @@ -13,9 +13,11 @@ import types import warnings from functools import lru_cache from pathlib import Path +from textwrap import dedent from types import TracebackType from typing import Any from typing import Callable +from typing import cast from typing import Dict from typing import Generator from typing import IO @@ -32,7 +34,6 @@ from typing import TYPE_CHECKING from typing import Union import attr -import py from pluggy import HookimplMarker from pluggy import HookspecMarker from pluggy import PluginManager @@ -50,10 +51,12 @@ from _pytest.compat import final from _pytest.compat import importlib_metadata from _pytest.outcomes import fail from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath from _pytest.pathlib import bestrelpath from _pytest.pathlib import import_path from _pytest.pathlib import ImportMode -from _pytest.store import Store +from _pytest.pathlib import resolve_package_path +from _pytest.stash import Stash from _pytest.warning_types import PytestConfigWarning if TYPE_CHECKING: @@ -103,7 +106,7 @@ class ExitCode(enum.IntEnum): class ConftestImportFailure(Exception): def __init__( self, - path: py.path.local, + path: Path, excinfo: Tuple[Type[Exception], Exception, TracebackType], ) -> None: super().__init__(path, excinfo) @@ -128,7 +131,7 @@ def filter_traceback_for_conftest_import_failure( def main( - args: Optional[Union[List[str], py.path.local]] = None, + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, ) -> Union[int, ExitCode]: """Perform an in-process test run. @@ -142,7 +145,7 @@ def main( try: config = _prepareconfig(args, plugins) except ConftestImportFailure as e: - exc_info = ExceptionInfo(e.excinfo) + exc_info = ExceptionInfo.from_exc_info(e.excinfo) tw = TerminalWriter(sys.stderr) tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) exc_info.traceback = exc_info.traceback.filter( @@ -235,6 +238,7 @@ default_plugins = essential_plugins + ( "unittest", "capture", "skipping", + "legacypath", "tmpdir", "monkeypatch", "recwarn", @@ -251,6 +255,7 @@ default_plugins = essential_plugins + ( "warnings", "logging", "reports", + "python_path", *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), "faulthandler", ) @@ -269,7 +274,9 @@ def get_config( config = Config( pluginmanager, invocation_params=Config.InvocationParams( - args=args or (), plugins=plugins, dir=Path.cwd(), + args=args or (), + plugins=plugins, + dir=Path.cwd(), ), ) @@ -285,7 +292,7 @@ def get_config( def get_plugin_manager() -> "PytestPluginManager": """Obtain a new instance of the - :py:class:`_pytest.config.PytestPluginManager`, with default plugins + :py:class:`pytest.PytestPluginManager`, with default plugins already loaded. This function can be used by integration with other tools, like hooking @@ -295,13 +302,13 @@ def get_plugin_manager() -> "PytestPluginManager": def _prepareconfig( - args: Optional[Union[py.path.local, List[str]]] = None, + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, ) -> "Config": if args is None: args = sys.argv[1:] - elif isinstance(args, py.path.local): - args = [str(args)] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] elif not isinstance(args, list): msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})" raise TypeError(msg.format(args, type(args))) @@ -324,6 +331,14 @@ def _prepareconfig( raise +def _get_directory(path: Path) -> Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + @final class PytestPluginManager(PluginManager): """A :py:class:`pluggy.PluginManager <pluggy.PluginManager>` with @@ -342,11 +357,17 @@ class PytestPluginManager(PluginManager): self._conftest_plugins: Set[types.ModuleType] = set() # State related to local conftest plugins. - self._dirpath2confmods: Dict[py.path.local, List[types.ModuleType]] = {} + self._dirpath2confmods: Dict[Path, List[types.ModuleType]] = {} self._conftestpath2mod: Dict[Path, types.ModuleType] = {} - self._confcutdir: Optional[py.path.local] = None + self._confcutdir: Optional[Path] = None self._noconftest = False - self._duplicatepaths: Set[py.path.local] = set() + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + self._duplicatepaths: Set[Path] = set() # plugins that were explicitly skipped with pytest.skip # list of (module name, skip reason) @@ -362,7 +383,10 @@ class PytestPluginManager(PluginManager): encoding: str = getattr(err, "encoding", "utf8") try: err = open( - os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding, + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, ) except Exception: pass @@ -471,7 +495,9 @@ class PytestPluginManager(PluginManager): # # Internal API for local conftest plugin handling. # - def _set_initial_conftests(self, namespace: argparse.Namespace) -> None: + def _set_initial_conftests( + self, namespace: argparse.Namespace, rootpath: Path + ) -> None: """Load initial conftest files given a preparsed "namespace". As conftest files may add their own command line options which have @@ -479,9 +505,9 @@ class PytestPluginManager(PluginManager): All builtin and 3rd party plugins will have been loaded, however, so common options will not confuse our logic here. """ - current = py.path.local() + current = Path.cwd() self._confcutdir = ( - current.join(namespace.confcutdir, abs=True) + absolutepath(current / namespace.confcutdir) if namespace.confcutdir else None ) @@ -495,53 +521,60 @@ class PytestPluginManager(PluginManager): i = path.find("::") if i != -1: path = path[:i] - anchor = current.join(path, abs=1) + anchor = absolutepath(current / path) if anchor.exists(): # we found some file object - self._try_load_conftest(anchor, namespace.importmode) + self._try_load_conftest(anchor, namespace.importmode, rootpath) foundanchor = True if not foundanchor: - self._try_load_conftest(current, namespace.importmode) + self._try_load_conftest(current, namespace.importmode, rootpath) def _try_load_conftest( - self, anchor: py.path.local, importmode: Union[str, ImportMode] + self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path ) -> None: - self._getconftestmodules(anchor, importmode) + self._getconftestmodules(anchor, importmode, rootpath) # let's also consider test* subdirs - if anchor.check(dir=1): - for x in anchor.listdir("test*"): - if x.check(dir=1): - self._getconftestmodules(x, importmode) + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._getconftestmodules(x, importmode, rootpath) - @lru_cache(maxsize=128) def _getconftestmodules( - self, path: py.path.local, importmode: Union[str, ImportMode], + self, path: Path, importmode: Union[str, ImportMode], rootpath: Path ) -> List[types.ModuleType]: if self._noconftest: return [] - if path.isfile(): - directory = path.dirpath() - else: - directory = path + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + existing_clist = self._dirpath2confmods.get(directory) + if existing_clist is not None: + return existing_clist # XXX these days we may rather want to use config.rootpath # and allow users to opt into looking into the rootdir parent # directories instead of requiring to specify confcutdir. clist = [] - for parent in directory.parts(): - if self._confcutdir and self._confcutdir.relto(parent): + confcutdir_parents = self._confcutdir.parents if self._confcutdir else [] + for parent in reversed((directory, *directory.parents)): + if parent in confcutdir_parents: continue - conftestpath = parent.join("conftest.py") - if conftestpath.isfile(): - mod = self._importconftest(conftestpath, importmode) + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest(conftestpath, importmode, rootpath) clist.append(mod) self._dirpath2confmods[directory] = clist return clist def _rget_with_confmod( - self, name: str, path: py.path.local, importmode: Union[str, ImportMode], + self, + name: str, + path: Path, + importmode: Union[str, ImportMode], + rootpath: Path, ) -> Tuple[types.ModuleType, Any]: - modules = self._getconftestmodules(path, importmode) + modules = self._getconftestmodules(path, importmode, rootpath=rootpath) for mod in reversed(modules): try: return mod, getattr(mod, name) @@ -550,24 +583,24 @@ class PytestPluginManager(PluginManager): raise KeyError(name) def _importconftest( - self, conftestpath: py.path.local, importmode: Union[str, ImportMode], + self, conftestpath: Path, importmode: Union[str, ImportMode], rootpath: Path ) -> types.ModuleType: # Use a resolved Path object as key to avoid loading the same conftest # twice with build systems that create build directories containing # symlinks to actual files. # Using Path().resolve() is better than py.path.realpath because # it resolves to the correct path/drive in case-insensitive file systems (#5792) - key = Path(str(conftestpath)).resolve() + key = conftestpath.resolve() with contextlib.suppress(KeyError): return self._conftestpath2mod[key] - pkgpath = conftestpath.pypkgpath() + pkgpath = resolve_package_path(conftestpath) if pkgpath is None: - _ensure_removed_sysmodule(conftestpath.purebasename) + _ensure_removed_sysmodule(conftestpath.stem) try: - mod = import_path(conftestpath, mode=importmode) + mod = import_path(conftestpath, mode=importmode, root=rootpath) except Exception as e: assert e.__traceback__ is not None exc_info = (type(e), e, e.__traceback__) @@ -577,10 +610,10 @@ class PytestPluginManager(PluginManager): self._conftest_plugins.add(mod) self._conftestpath2mod[key] = mod - dirpath = conftestpath.dirpath() + dirpath = conftestpath.parent if dirpath in self._dirpath2confmods: for path, mods in self._dirpath2confmods.items(): - if path and path.relto(dirpath) or path == dirpath: + if path and dirpath in path.parents or path == dirpath: assert mod not in mods mods.append(mod) self.trace(f"loading conftestmodule {mod!r}") @@ -588,7 +621,9 @@ class PytestPluginManager(PluginManager): return mod def _check_non_top_pytest_plugins( - self, mod: types.ModuleType, conftestpath: py.path.local, + self, + mod: types.ModuleType, + conftestpath: Path, ) -> None: if ( hasattr(mod, "pytest_plugins") @@ -614,6 +649,7 @@ class PytestPluginManager(PluginManager): def consider_preparse( self, args: Sequence[str], *, exclude_only: bool = False ) -> None: + """:meta private:""" i = 0 n = len(args) while i < n: @@ -635,6 +671,7 @@ class PytestPluginManager(PluginManager): self.consider_pluginarg(parg) def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" if arg.startswith("no:"): name = arg[3:] if name in essential_plugins: @@ -660,12 +697,15 @@ class PytestPluginManager(PluginManager): self.import_plugin(arg, consider_entry_points=True) def consider_conftest(self, conftestmodule: types.ModuleType) -> None: + """:meta private:""" self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self) -> None: + """:meta private:""" self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) def _import_plugin_specs( @@ -703,7 +743,7 @@ class PytestPluginManager(PluginManager): __import__(importspec) except ImportError as e: raise ImportError( - 'Error importing plugin "{}": {}'.format(modname, str(e.args[0])) + f'Error importing plugin "{modname}": {e.args[0]}' ).with_traceback(e.__traceback__) from e except Skipped as e: @@ -823,6 +863,7 @@ class Config: """Access to configuration values, pluginmanager and plugin hooks. :param PytestPluginManager pluginmanager: + A pytest PluginManager. :param InvocationParams invocation_params: Object containing parameters regarding the :func:`pytest.main` @@ -830,7 +871,7 @@ class Config: """ @final - @attr.s(frozen=True) + @attr.s(frozen=True, auto_attribs=True) class InvocationParams: """Holds parameters passed during :func:`pytest.main`. @@ -846,21 +887,12 @@ class Config: Plugins accessing ``InvocationParams`` must be aware of that. """ - args = attr.ib(type=Tuple[str, ...], converter=_args_converter) - """The command-line arguments as passed to :func:`pytest.main`. - - :type: Tuple[str, ...] - """ - plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]]) - """Extra plugins, might be `None`. - - :type: Optional[Sequence[Union[str, plugin]]] - """ - dir = attr.ib(type=Path) - """The directory from which :func:`pytest.main` was invoked. - - :type: pathlib.Path - """ + args: Tuple[str, ...] = attr.ib(converter=_args_converter) + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] + """Extra plugins, might be `None`.""" + dir: Path + """The directory from which :func:`pytest.main` was invoked.""" def __init__( self, @@ -891,6 +923,7 @@ class Config: self._parser = Parser( usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", processopt=self._processopt, + _ispytest=True, ) self.pluginmanager = pluginmanager """The plugin manager handles plugin registration and hook invocation. @@ -898,15 +931,23 @@ class Config: :type: PytestPluginManager """ + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + from .compat import PathAwareHookProxy + self.trace = self.pluginmanager.trace.root.get("config") - self.hook = self.pluginmanager.hook + self.hook = PathAwareHookProxy(self.pluginmanager.hook) self._inicache: Dict[str, Any] = {} self._override_ini: Sequence[str] = () self._opt2dest: Dict[str, str] = {} self._cleanup: List[Callable[[], None]] = [] - # A place where plugins can store information on the config for their - # own use. Currently only intended for internal plugins. - self._store = Store() self.pluginmanager.register(self, "pytestconfig") self._configured = False self.hook.pytest_addoption.call_historic( @@ -919,17 +960,6 @@ class Config: self.cache: Optional[Cache] = None @property - def invocation_dir(self) -> py.path.local: - """The directory from which pytest was invoked. - - Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`, - which is a :class:`pathlib.Path`. - - :type: py.path.local - """ - return py.path.local(str(self.invocation_params.dir)) - - @property def rootpath(self) -> Path: """The path to the :ref:`rootdir <rootdir>`. @@ -940,16 +970,6 @@ class Config: return self._rootpath @property - def rootdir(self) -> py.path.local: - """The path to the :ref:`rootdir <rootdir>`. - - Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. - - :type: py.path.local - """ - return py.path.local(str(self.rootpath)) - - @property def inipath(self) -> Optional[Path]: """The path to the :ref:`configfile <configfiles>`. @@ -959,19 +979,9 @@ class Config: """ return self._inipath - @property - def inifile(self) -> Optional[py.path.local]: - """The path to the :ref:`configfile <configfiles>`. - - Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. - - :type: Optional[py.path.local] - """ - return py.path.local(str(self.inipath)) if self.inipath else None - def add_cleanup(self, func: Callable[[], None]) -> None: """Add a function to be called when the config object gets out of - use (usually coninciding with pytest_unconfigure).""" + use (usually coinciding with pytest_unconfigure).""" self._cleanup.append(func) def _do_configure(self) -> None: @@ -1067,7 +1077,9 @@ class Config: @hookimpl(trylast=True) def pytest_load_initial_conftests(self, early_config: "Config") -> None: - self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + self.pluginmanager._set_initial_conftests( + early_config.known_args_namespace, rootpath=early_config.rootpath + ) def _initini(self, args: Sequence[str]) -> None: ns, unknown_args = self._parser.parse_known_and_unknown_args( @@ -1204,8 +1216,8 @@ class Config: @hookimpl(hookwrapper=True) def pytest_collection(self) -> Generator[None, None, None]: - """Validate invalid ini keys after collection is done so we take in account - options added by late-loading conftest files.""" + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. yield self._validate_config_options() @@ -1225,7 +1237,11 @@ class Config: if Version(minver) > Version(pytest.__version__): raise pytest.UsageError( "%s: 'minversion' requires pytest-%s, actual pytest-%s'" - % (self.inipath, minver, pytest.__version__,) + % ( + self.inipath, + minver, + pytest.__version__, + ) ) def _validate_config_options(self) -> None: @@ -1247,14 +1263,16 @@ class Config: missing_plugins = [] for required_plugin in required_plugins: try: - spec = Requirement(required_plugin) + req = Requirement(required_plugin) except InvalidRequirement: missing_plugins.append(required_plugin) continue - if spec.name not in plugin_dist_info: + if req.name not in plugin_dist_info: missing_plugins.append(required_plugin) - elif Version(plugin_dist_info[spec.name]) not in spec.specifier: + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): missing_plugins.append(required_plugin) if missing_plugins: @@ -1352,8 +1370,8 @@ class Config: """Return configuration value from an :ref:`ini file <configfiles>`. If the specified name hasn't been registered through a prior - :py:func:`parser.addini <_pytest.config.argparsing.Parser.addini>` - call (usually from a plugin), a ValueError is raised. + :func:`parser.addini <pytest.Parser.addini>` call (usually from a + plugin), a ValueError is raised. """ try: return self._inicache[name] @@ -1361,6 +1379,12 @@ class Config: self._inicache[name] = val = self._getini(name) return val + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: Union[str, List[str]]): + msg = f"unknown configuration type: {type}" + raise ValueError(msg, value) # pragma: no cover + def _getini(self, name: str): try: description, type, default = self._parser._inidict[name] @@ -1393,12 +1417,12 @@ class Config: # a_line_list = ["tests", "acceptance"] # in this case, we already have a list ready to use. # - if type == "pathlist": + if type == "paths": # TODO: This assert is probably not valid in all cases. assert self.inipath is not None dp = self.inipath.parent input_values = shlex.split(value) if isinstance(value, str) else value - return [py.path.local(str(dp / x)) for x in input_values] + return [dp / x for x in input_values] elif type == "args": return shlex.split(value) if isinstance(value, str) else value elif type == "linelist": @@ -1408,25 +1432,30 @@ class Config: return value elif type == "bool": return _strtobool(str(value).strip()) - else: - assert type in [None, "string"] + elif type == "string": + return value + elif type is None: return value + else: + return self._getini_unknown_type(name, type, value) def _getconftest_pathlist( - self, name: str, path: py.path.local - ) -> Optional[List[py.path.local]]: + self, name: str, path: Path, rootpath: Path + ) -> Optional[List[Path]]: try: mod, relroots = self.pluginmanager._rget_with_confmod( - name, path, self.getoption("importmode") + name, path, self.getoption("importmode"), rootpath ) except KeyError: return None - modpath = py.path.local(mod.__file__).dirpath() - values: List[py.path.local] = [] + modpath = Path(mod.__file__).parent + values: List[Path] = [] for relroot in relroots: - if not isinstance(relroot, py.path.local): + if isinstance(relroot, os.PathLike): + relroot = Path(relroot) + else: relroot = relroot.replace("/", os.sep) - relroot = modpath.join(relroot, abs=True) + relroot = absolutepath(modpath / relroot) values.append(relroot) return values @@ -1498,7 +1527,8 @@ class Config: "(are you using python -O?)\n" ) self.issue_config_time_warning( - PytestConfigWarning(warning_text), stacklevel=3, + PytestConfigWarning(warning_text), + stacklevel=3, ) def _warn_about_skipped_plugins(self) -> None: @@ -1566,17 +1596,54 @@ def parse_warning_filter( ) -> Tuple[str, str, Type[Warning], str, int]: """Parse a warnings filter string. - This is copied from warnings._setoption, but does not apply the filter, - only parses it, and makes the escaping optional. + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + parts = arg.split(":") if len(parts) > 5: - raise warnings._OptionError(f"too many fields (max 5): {arg!r}") + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + while len(parts) < 5: parts.append("") - action_, message, category_, module, lineno_ = [s.strip() for s in parts] - action: str = warnings._getaction(action_) # type: ignore[attr-defined] - category: Type[Warning] = warnings._getcategory(category_) # type: ignore[attr-defined] + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: str = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) + try: + category: Type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) if message and escape: message = re.escape(message) if module and escape: @@ -1585,14 +1652,38 @@ def parse_warning_filter( try: lineno = int(lineno_) if lineno < 0: - raise ValueError - except (ValueError, OverflowError) as e: - raise warnings._OptionError(f"invalid lineno {lineno_!r}") from e + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) else: lineno = 0 return action, message, category, module, lineno +def _resolve_warning_category(category: str) -> Type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(Type[Warning], cat) + + def apply_warning_filters( config_filters: Iterable[str], cmdline_filters: Iterable[str] ) -> None: diff --git a/contrib/python/pytest/py3/_pytest/config/argparsing.py b/contrib/python/pytest/py3/_pytest/config/argparsing.py index 9a48196552..b0bb3f168f 100644 --- a/contrib/python/pytest/py3/_pytest/config/argparsing.py +++ b/contrib/python/pytest/py3/_pytest/config/argparsing.py @@ -1,4 +1,5 @@ import argparse +import os import sys import warnings from gettext import gettext @@ -14,11 +15,13 @@ from typing import Tuple from typing import TYPE_CHECKING from typing import Union -import py - import _pytest._io from _pytest.compat import final from _pytest.config.exceptions import UsageError +from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT +from _pytest.deprecated import ARGUMENT_TYPE_STR +from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE +from _pytest.deprecated import check_ispytest if TYPE_CHECKING: from typing import NoReturn @@ -41,8 +44,11 @@ class Parser: self, usage: Optional[str] = None, processopt: Optional[Callable[["Argument"], None]] = None, + *, + _ispytest: bool = False, ) -> None: - self._anonymous = OptionGroup("custom options", parser=self) + check_ispytest(_ispytest) + self._anonymous = OptionGroup("custom options", parser=self, _ispytest=True) self._groups: List[OptionGroup] = [] self._processopt = processopt self._usage = usage @@ -65,14 +71,14 @@ class Parser: :after: Name of another group, used for ordering --help output. The returned group object has an ``addoption`` method with the same - signature as :py:func:`parser.addoption - <_pytest.config.argparsing.Parser.addoption>` but will be shown in the - respective group in the output of ``pytest. --help``. + signature as :func:`parser.addoption <pytest.Parser.addoption>` but + will be shown in the respective group in the output of + ``pytest. --help``. """ for group in self._groups: if group.name == name: return group - group = OptionGroup(name, description, parser=self) + group = OptionGroup(name, description, parser=self, _ispytest=True) i = 0 for i, grp in enumerate(self._groups): if grp.name == after: @@ -97,14 +103,14 @@ class Parser: def parse( self, - args: Sequence[Union[str, py.path.local]], + args: Sequence[Union[str, "os.PathLike[str]"]], namespace: Optional[argparse.Namespace] = None, ) -> argparse.Namespace: from _pytest._argcomplete import try_argcomplete self.optparser = self._getparser() try_argcomplete(self.optparser) - strargs = [str(x) if isinstance(x, py.path.local) else x for x in args] + strargs = [os.fspath(x) for x in args] return self.optparser.parse_args(strargs, namespace=namespace) def _getparser(self) -> "MyOptionParser": @@ -128,7 +134,7 @@ class Parser: def parse_setoption( self, - args: Sequence[Union[str, py.path.local]], + args: Sequence[Union[str, "os.PathLike[str]"]], option: argparse.Namespace, namespace: Optional[argparse.Namespace] = None, ) -> List[str]: @@ -139,7 +145,7 @@ class Parser: def parse_known_args( self, - args: Sequence[Union[str, py.path.local]], + args: Sequence[Union[str, "os.PathLike[str]"]], namespace: Optional[argparse.Namespace] = None, ) -> argparse.Namespace: """Parse and return a namespace object with known arguments at this point.""" @@ -147,13 +153,13 @@ class Parser: def parse_known_and_unknown_args( self, - args: Sequence[Union[str, py.path.local]], + args: Sequence[Union[str, "os.PathLike[str]"]], namespace: Optional[argparse.Namespace] = None, ) -> Tuple[argparse.Namespace, List[str]]: """Parse and return a namespace object with known arguments, and the remaining arguments unknown at this point.""" optparser = self._getparser() - strargs = [str(x) if isinstance(x, py.path.local) else x for x in args] + strargs = [os.fspath(x) for x in args] return optparser.parse_known_args(strargs, namespace=namespace) def addini( @@ -161,22 +167,35 @@ class Parser: name: str, help: str, type: Optional[ - "Literal['string', 'pathlist', 'args', 'linelist', 'bool']" + "Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']" ] = None, default=None, ) -> None: """Register an ini-file option. - :name: Name of the ini-variable. - :type: Type of the variable, can be ``string``, ``pathlist``, ``args``, - ``linelist`` or ``bool``. Defaults to ``string`` if ``None`` or - not passed. - :default: Default value if no ini-file option exists but is queried. + :name: + Name of the ini-variable. + :type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + + .. versionadded:: 7.0 + The ``paths`` variable type. + + Defaults to ``string`` if ``None`` or not passed. + :default: + Default value if no ini-file option exists but is queried. The value of ini-variables can be retrieved via a call to - :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + :py:func:`config.getini(name) <pytest.Config.getini>`. """ - assert type in (None, "string", "pathlist", "args", "linelist", "bool") + assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") self._inidict[name] = (help, type, default) self._ininames.append(name) @@ -213,12 +232,7 @@ class Argument: self._short_opts: List[str] = [] self._long_opts: List[str] = [] if "%default" in (attrs.get("help") or ""): - warnings.warn( - 'pytest now uses argparse. "%default" should be' - ' changed to "%(default)s" ', - DeprecationWarning, - stacklevel=3, - ) + warnings.warn(ARGUMENT_PERCENT_DEFAULT, stacklevel=3) try: typ = attrs["type"] except KeyError: @@ -228,11 +242,7 @@ class Argument: if isinstance(typ, str): if typ == "choice": warnings.warn( - "`type` argument to addoption() is the string %r." - " For choices this is optional and can be omitted, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, + ARGUMENT_TYPE_STR_CHOICE.format(typ=typ, names=names), stacklevel=4, ) # argparse expects a type here take it from @@ -240,11 +250,7 @@ class Argument: attrs["type"] = type(attrs["choices"][0]) else: warnings.warn( - "`type` argument to addoption() is the string %r, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, - stacklevel=4, + ARGUMENT_TYPE_STR.format(typ=typ, names=names), stacklevel=4 ) attrs["type"] = Argument._typ_map[typ] # Used in test_parseopt -> test_parse_defaultgetter. @@ -332,9 +338,17 @@ class Argument: class OptionGroup: + """A group of options shown in its own section.""" + def __init__( - self, name: str, description: str = "", parser: Optional[Parser] = None + self, + name: str, + description: str = "", + parser: Optional[Parser] = None, + *, + _ispytest: bool = False, ) -> None: + check_ispytest(_ispytest) self.name = name self.description = description self.options: List[Argument] = [] @@ -344,9 +358,9 @@ class OptionGroup: """Add an option to this group. If a shortened version of a long option is specified, it will - be suppressed in the help. addoption('--twowords', '--two-words') - results in help showing '--two-words' only, but --twowords gets - accepted **and** the automatic destination is in args.twowords. + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. """ conflict = set(optnames).intersection( name for opt in self.options for name in opt.names() @@ -378,8 +392,7 @@ class MyOptionParser(argparse.ArgumentParser): prog: Optional[str] = None, ) -> None: self._parser = parser - argparse.ArgumentParser.__init__( - self, + super().__init__( prog=prog, usage=parser._usage, add_help=False, @@ -472,7 +485,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): super().__init__(*args, **kwargs) def _format_action_invocation(self, action: argparse.Action) -> str: - orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + orgstr = super()._format_action_invocation(action) if orgstr and orgstr[0] != "-": # only optional arguments return orgstr res: Optional[str] = getattr(action, "_formatted_action_invocation", None) diff --git a/contrib/python/pytest/py3/_pytest/config/compat.py b/contrib/python/pytest/py3/_pytest/config/compat.py new file mode 100644 index 0000000000..ba267d2150 --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/config/compat.py @@ -0,0 +1,71 @@ +import functools +import warnings +from pathlib import Path +from typing import Optional + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG +from _pytest.nodes import _check_path + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_caller): + self.__hook_caller = hook_caller + + def __dir__(self): + return dir(self.__hook_caller) + + def __getattr__(self, key, _wraps=functools.wraps): + hook = getattr(self.__hook_caller, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @_wraps(hook) + def fixed_hook(**kw): + + path_value: Optional[Path] = kw.pop(path_var, None) + fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook diff --git a/contrib/python/pytest/py3/_pytest/config/findpaths.py b/contrib/python/pytest/py3/_pytest/config/findpaths.py index 2edf54536b..89ade5f23b 100644 --- a/contrib/python/pytest/py3/_pytest/config/findpaths.py +++ b/contrib/python/pytest/py3/_pytest/config/findpaths.py @@ -64,9 +64,13 @@ def load_config_dict_from_file( # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. elif filepath.suffix == ".toml": - import toml + import tomli - config = toml.load(str(filepath)) + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomli.loads(toml_text) + except tomli.TOMLDecodeError as exc: + raise UsageError(str(exc)) from exc result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) if result is not None: @@ -83,9 +87,7 @@ def load_config_dict_from_file( def locate_config( args: Iterable[Path], -) -> Tuple[ - Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]], -]: +) -> Tuple[Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]]]: """Search in the list of arguments for a valid ini-file for pytest, and return a tuple of (rootdir, inifile, cfg-dict).""" config_names = [ @@ -178,7 +180,7 @@ def determine_setup( inipath: Optional[Path] = inipath_ inicfg = load_config_dict_from_file(inipath_) or {} if rootdir_cmd_arg is None: - rootdir = get_common_ancestor(dirs) + rootdir = inipath_.parent else: ancestor = get_common_ancestor(dirs) rootdir, inipath, inicfg = locate_config([ancestor]) diff --git a/contrib/python/pytest/py3/_pytest/debugging.py b/contrib/python/pytest/py3/_pytest/debugging.py index b52840006b..eb51eddbe4 100644 --- a/contrib/python/pytest/py3/_pytest/debugging.py +++ b/contrib/python/pytest/py3/_pytest/debugging.py @@ -90,7 +90,7 @@ def pytest_addoption(parser: Parser) -> None: dest="usepdb_cls", metavar="modulename:classname", type=_validate_usepdb_cls, - help="start a custom interactive Python debugger on errors. " + help="specify a custom interactive Python debugger for use with --pdb." "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", ) group._addoption( @@ -125,7 +125,7 @@ def pytest_configure(config: Config) -> None: pytestPDB._config, ) = pytestPDB._saved.pop() - config._cleanup.append(fin) + config.add_cleanup(fin) class pytestPDB: diff --git a/contrib/python/pytest/py3/_pytest/deprecated.py b/contrib/python/pytest/py3/_pytest/deprecated.py index 19b31d6653..5248927113 100644 --- a/contrib/python/pytest/py3/_pytest/deprecated.py +++ b/contrib/python/pytest/py3/_pytest/deprecated.py @@ -11,6 +11,8 @@ in case of warnings which need to format their messages. from warnings import warn from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn7Warning +from _pytest.warning_types import PytestRemovedIn8Warning from _pytest.warning_types import UnformattedWarning # set of plugins which have been integrated into the core; we use this list to ignore @@ -23,47 +25,111 @@ DEPRECATED_EXTERNAL_PLUGINS = { FILLFUNCARGS = UnformattedWarning( - PytestDeprecationWarning, + PytestRemovedIn7Warning, "{name} is deprecated, use " "function._request._fillfixtures() instead if you cannot avoid reaching into internals.", ) PYTEST_COLLECT_MODULE = UnformattedWarning( - PytestDeprecationWarning, + PytestRemovedIn7Warning, "pytest.collect.{name} was moved to pytest.{name}\n" "Please update to the new name.", ) +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". YIELD_FIXTURE = PytestDeprecationWarning( "@pytest.yield_fixture is deprecated.\n" "Use @pytest.fixture instead; they are the same." ) -MINUS_K_DASH = PytestDeprecationWarning( +MINUS_K_DASH = PytestRemovedIn7Warning( "The `-k '-expr'` syntax to -k is deprecated.\nUse `-k 'not expr'` instead." ) -MINUS_K_COLON = PytestDeprecationWarning( +MINUS_K_COLON = PytestRemovedIn7Warning( "The `-k 'expr:'` syntax to -k is deprecated.\n" "Please open an issue if you use this and want a replacement." ) -WARNING_CAPTURED_HOOK = PytestDeprecationWarning( +WARNING_CAPTURED_HOOK = PytestRemovedIn7Warning( "The pytest_warning_captured is deprecated and will be removed in a future release.\n" "Please use pytest_warning_recorded instead." ) -FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestDeprecationWarning( +WARNING_CMDLINE_PREPARSE_HOOK = PytestRemovedIn8Warning( + "The pytest_cmdline_preparse hook is deprecated and will be removed in a future release. \n" + "Please use pytest_load_initial_conftests hook instead." +) + +FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestRemovedIn8Warning( "The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; " "use self.session.gethookproxy() and self.session.isinitpath() instead. " ) -STRICT_OPTION = PytestDeprecationWarning( +STRICT_OPTION = PytestRemovedIn8Warning( "The --strict option is deprecated, use --strict-markers instead." ) +# This deprecation is never really meant to be removed. PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") +UNITTEST_SKIP_DURING_COLLECTION = PytestRemovedIn8Warning( + "Raising unittest.SkipTest to skip tests during collection is deprecated. " + "Use pytest.skip() instead." +) + +ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning( + 'pytest now uses argparse. "%default" should be changed to "%(default)s"', +) + +ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + +ARGUMENT_TYPE_STR = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +WARNS_NONE_ARG = PytestRemovedIn8Warning( + "Passing None has been deprecated.\n" + "See https://docs.pytest.org/en/latest/how-to/capture-warnings.html" + "#additional-use-cases-of-warnings-in-tests" + " for alternatives in common use cases." +) + +KEYWORD_MSG_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "pytest.{func}(msg=...) is now deprecated, use pytest.{func}(reason=...) instead", +) + +INSTANCE_COLLECTOR = PytestRemovedIn8Warning( + "The pytest.Instance collector type is deprecated and is no longer used. " + "See https://docs.pytest.org/en/latest/deprecations.html#the-pytest-instance-collector", +) # You want to make some `__init__` or function "private". # @@ -82,6 +148,8 @@ PRIVATE = PytestDeprecationWarning("A private pytest class or function was used. # # All other calls will get the default _ispytest=False and trigger # the warning (possibly error in the future). + + def check_ispytest(ispytest: bool) -> None: if not ispytest: warn(PRIVATE, stacklevel=3) diff --git a/contrib/python/pytest/py3/_pytest/doctest.py b/contrib/python/pytest/py3/_pytest/doctest.py index 64e8f0e0ee..0784f431b8 100644 --- a/contrib/python/pytest/py3/_pytest/doctest.py +++ b/contrib/python/pytest/py3/_pytest/doctest.py @@ -1,12 +1,14 @@ """Discover and run doctests in modules and test files.""" import bdb import inspect +import os import platform import sys import traceback import types import warnings from contextlib import contextmanager +from pathlib import Path from typing import Any from typing import Callable from typing import Dict @@ -21,8 +23,6 @@ from typing import Type from typing import TYPE_CHECKING from typing import Union -import py.path - import pytest from _pytest import outcomes from _pytest._code.code import ExceptionInfo @@ -35,6 +35,7 @@ from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureRequest from _pytest.nodes import Collector from _pytest.outcomes import OutcomeException +from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import import_path from _pytest.python_api import approx from _pytest.warning_types import PytestWarning @@ -119,34 +120,38 @@ def pytest_unconfigure() -> None: def pytest_collect_file( - path: py.path.local, parent: Collector, + file_path: Path, + parent: Collector, ) -> Optional[Union["DoctestModule", "DoctestTextfile"]]: config = parent.config - if path.ext == ".py": - if config.option.doctestmodules and not _is_setup_py(path): - mod: DoctestModule = DoctestModule.from_parent(parent, fspath=path) + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + mod: DoctestModule = DoctestModule.from_parent(parent, path=file_path) return mod - elif _is_doctest(config, path, parent): - txt: DoctestTextfile = DoctestTextfile.from_parent(parent, fspath=path) + elif _is_doctest(config, file_path, parent): + txt: DoctestTextfile = DoctestTextfile.from_parent(parent, path=file_path) return txt return None -def _is_setup_py(path: py.path.local) -> bool: - if path.basename != "setup.py": +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": return False - contents = path.read_binary() + contents = path.read_bytes() return b"setuptools" in contents or b"distutils" in contents -def _is_doctest(config: Config, path: py.path.local, parent) -> bool: - if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): return True globs = config.getoption("doctestglob") or ["test*.txt"] - for glob in globs: - if path.check(fnmatch=glob): - return True - return False + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" class ReprFailDoctest(TerminalRepr): @@ -185,13 +190,15 @@ def _init_runner_class() -> Type["doctest.DocTestRunner"]: optionflags: int = 0, continue_on_failure: bool = True, ) -> None: - doctest.DebugRunner.__init__( - self, checker=checker, verbose=verbose, optionflags=optionflags - ) + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) self.continue_on_failure = continue_on_failure def report_failure( - self, out, test: "doctest.DocTest", example: "doctest.Example", got: str, + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + got: str, ) -> None: failure = doctest.DocTestFailure(test, example, got) if self.continue_on_failure: @@ -262,7 +269,7 @@ class DoctestItem(pytest.Item): runner: "doctest.DocTestRunner", dtest: "doctest.DocTest", ): - # incompatible signature due to to imposed limits on sublcass + # incompatible signature due to imposed limits on subclass """The public named constructor.""" return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) @@ -301,13 +308,14 @@ class DoctestItem(pytest.Item): # TODO: Type ignored -- breaks Liskov Substitution. def repr_failure( # type: ignore[override] - self, excinfo: ExceptionInfo[BaseException], + self, + excinfo: ExceptionInfo[BaseException], ) -> Union[str, TerminalRepr]: import doctest failures: Optional[ Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]] - ] = (None) + ] = None if isinstance( excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) ): @@ -315,61 +323,57 @@ class DoctestItem(pytest.Item): elif isinstance(excinfo.value, MultipleDoctestFailures): failures = excinfo.value.failures - if failures is not None: - reprlocation_lines = [] - for failure in failures: - example = failure.example - test = failure.test - filename = test.filename - if test.lineno is None: - lineno = None - else: - lineno = test.lineno + example.lineno + 1 - message = type(failure).__name__ - # TODO: ReprFileLocation doesn't expect a None lineno. - reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] - checker = _get_checker() - report_choice = _get_report_choice( - self.config.getoption("doctestreport") - ) - if lineno is not None: - assert failure.test.docstring is not None - lines = failure.test.docstring.splitlines(False) - # add line numbers to the left of the error message - assert test.lineno is not None - lines = [ - "%03d %s" % (i + test.lineno + 1, x) - for (i, x) in enumerate(lines) - ] - # trim docstring error lines to 10 - lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] - else: - lines = [ - "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" - ] - indent = ">>>" - for line in example.source.splitlines(): - lines.append(f"??? {indent} {line}") - indent = "..." - if isinstance(failure, doctest.DocTestFailure): - lines += checker.output_difference( - example, failure.got, report_choice - ).split("\n") - else: - inner_excinfo = ExceptionInfo(failure.exc_info) - lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] - lines += [ - x.strip("\n") - for x in traceback.format_exception(*failure.exc_info) - ] - reprlocation_lines.append((reprlocation, lines)) - return ReprFailDoctest(reprlocation_lines) - else: + if failures is None: return super().repr_failure(excinfo) - def reportinfo(self): + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: assert self.dtest is not None - return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name + return self.path, self.dtest.lineno, "[doctest] %s" % self.name def _get_flag_lookup() -> Dict[str, int]: @@ -416,9 +420,9 @@ class DoctestTextfile(pytest.Module): # Inspired by doctest.testfile; ideally we would use it directly, # but it doesn't support passing a custom checker. encoding = self.config.getini("doctest_encoding") - text = self.fspath.read_text(encoding) - filename = str(self.fspath) - name = self.fspath.basename + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name globs = {"__name__": "__main__"} optionflags = get_optionflags(self) @@ -500,15 +504,22 @@ class DoctestModule(pytest.Module): def _find_lineno(self, obj, source_lines): """Doctest code does not take into account `@property`, this - is a hackish way to fix it. + is a hackish way to fix it. https://bugs.python.org/issue17446 - https://bugs.python.org/issue17446 + Wrapped Doctests will need to be unwrapped so the correct + line number is returned. This will be reported upstream. #8796 """ if isinstance(obj, property): obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + # Type ignored because this is a private function. - return doctest.DocTestFinder._find_lineno( # type: ignore - self, obj, source_lines, + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, ) def _find( @@ -519,20 +530,22 @@ class DoctestModule(pytest.Module): with _patch_unwrap_mock_aware(): # Type ignored because this is a private function. - doctest.DocTestFinder._find( # type: ignore - self, tests, obj, name, module, source_lines, globs, seen + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen ) - if self.fspath.basename == "conftest.py": + if self.path.name == "conftest.py": module = self.config.pluginmanager._importconftest( - self.fspath, self.config.getoption("importmode") + self.path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, ) else: try: - module = import_path(self.fspath) + module = import_path(self.path, root=self.config.rootpath) except ImportError: if self.config.getvalue("doctest_ignore_import_errors"): - pytest.skip("unable to import module %r" % self.fspath) + pytest.skip("unable to import module %r" % self.path) else: raise # Uses internal doctest module parsing mechanism. @@ -603,7 +616,7 @@ def _init_checker_class() -> Type["doctest.OutputChecker"]: ) def check_output(self, want: str, got: str, optionflags: int) -> bool: - if doctest.OutputChecker.check_output(self, want, got, optionflags): + if super().check_output(want, got, optionflags): return True allow_unicode = optionflags & _get_allow_unicode_flag() @@ -627,7 +640,7 @@ def _init_checker_class() -> Type["doctest.OutputChecker"]: if allow_number: got = self._remove_unwanted_precision(want, got) - return doctest.OutputChecker.check_output(self, want, got, optionflags) + return super().check_output(want, got, optionflags) def _remove_unwanted_precision(self, want: str, got: str) -> str: wants = list(self._number_re.finditer(want)) @@ -640,10 +653,7 @@ def _init_checker_class() -> Type["doctest.OutputChecker"]: exponent: Optional[str] = w.group("exponent1") if exponent is None: exponent = w.group("exponent2") - if fraction is None: - precision = 0 - else: - precision = len(fraction) + precision = 0 if fraction is None else len(fraction) if exponent is not None: precision -= int(exponent) if float(w.group()) == approx(float(g.group()), abs=10 ** -precision): diff --git a/contrib/python/pytest/py3/_pytest/faulthandler.py b/contrib/python/pytest/py3/_pytest/faulthandler.py index ff673b5b16..aaee307ff2 100644 --- a/contrib/python/pytest/py3/_pytest/faulthandler.py +++ b/contrib/python/pytest/py3/_pytest/faulthandler.py @@ -8,10 +8,11 @@ import pytest from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.nodes import Item -from _pytest.store import StoreKey +from _pytest.stash import StashKey -fault_handler_stderr_key = StoreKey[TextIO]() +fault_handler_stderr_key = StashKey[TextIO]() +fault_handler_originally_enabled_key = StashKey[bool]() def pytest_addoption(parser: Parser) -> None: @@ -25,92 +26,72 @@ def pytest_addoption(parser: Parser) -> None: def pytest_configure(config: Config) -> None: import faulthandler - if not faulthandler.is_enabled(): - # faulthhandler is not enabled, so install plugin that does the actual work - # of enabling faulthandler before each test executes. - config.pluginmanager.register(FaultHandlerHooks(), "faulthandler-hooks") - else: - # Do not handle dumping to stderr if faulthandler is already enabled, so warn - # users that the option is being ignored. - timeout = FaultHandlerHooks.get_timeout_config_value(config) - if timeout > 0: - config.issue_config_time_warning( - pytest.PytestConfigWarning( - "faulthandler module enabled before pytest configuration step, " - "'faulthandler_timeout' option ignored" - ), - stacklevel=2, - ) - - -class FaultHandlerHooks: - """Implements hooks that will actually install fault handler before tests execute, - as well as correctly handle pdb and internal errors.""" - - def pytest_configure(self, config: Config) -> None: - import faulthandler + stderr_fd_copy = os.dup(get_stderr_fileno()) + config.stash[fault_handler_stderr_key] = open(stderr_fd_copy, "w") + config.stash[fault_handler_originally_enabled_key] = faulthandler.is_enabled() + faulthandler.enable(file=config.stash[fault_handler_stderr_key]) + - stderr_fd_copy = os.dup(self._get_stderr_fileno()) - config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w") - faulthandler.enable(file=config._store[fault_handler_stderr_key]) +def pytest_unconfigure(config: Config) -> None: + import faulthandler - def pytest_unconfigure(self, config: Config) -> None: + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_key in config.stash: + config.stash[fault_handler_stderr_key].close() + del config.stash[fault_handler_stderr_key] + if config.stash.get(fault_handler_originally_enabled_key, False): + # Re-enable the faulthandler if it was originally enabled. + faulthandler.enable(file=get_stderr_fileno()) + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, io.UnsupportedOperation): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + timeout = get_timeout_config_value(item.config) + stderr = item.config.stash[fault_handler_stderr_key] + if timeout > 0 and stderr is not None: import faulthandler - faulthandler.disable() - # close our dup file installed during pytest_configure - # re-enable the faulthandler, attaching it to the default sys.stderr - # so we can see crashes after pytest has finished, usually during - # garbage collection during interpreter shutdown - config._store[fault_handler_stderr_key].close() - del config._store[fault_handler_stderr_key] - faulthandler.enable(file=self._get_stderr_fileno()) - - @staticmethod - def _get_stderr_fileno(): + faulthandler.dump_traceback_later(timeout, file=stderr) try: - fileno = sys.stderr.fileno() - # The Twisted Logger will return an invalid file descriptor since it is not backed - # by an FD. So, let's also forward this to the same code path as with pytest-xdist. - if fileno == -1: - raise AttributeError() - return fileno - except (AttributeError, io.UnsupportedOperation): - # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. - # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors - # This is potentially dangerous, but the best we can do. - return sys.__stderr__.fileno() - - @staticmethod - def get_timeout_config_value(config): - return float(config.getini("faulthandler_timeout") or 0.0) - - @pytest.hookimpl(hookwrapper=True, trylast=True) - def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: - timeout = self.get_timeout_config_value(item.config) - stderr = item.config._store[fault_handler_stderr_key] - if timeout > 0 and stderr is not None: - import faulthandler - - faulthandler.dump_traceback_later(timeout, file=stderr) - try: - yield - finally: - faulthandler.cancel_dump_traceback_later() - else: yield + finally: + faulthandler.cancel_dump_traceback_later() + else: + yield - @pytest.hookimpl(tryfirst=True) - def pytest_enter_pdb(self) -> None: - """Cancel any traceback dumping due to timeout before entering pdb.""" - import faulthandler - faulthandler.cancel_dump_traceback_later() +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler - @pytest.hookimpl(tryfirst=True) - def pytest_exception_interact(self) -> None: - """Cancel any traceback dumping due to an interactive exception being - raised.""" - import faulthandler + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler - faulthandler.cancel_dump_traceback_later() + faulthandler.cancel_dump_traceback_later() diff --git a/contrib/python/pytest/py3/_pytest/fixtures.py b/contrib/python/pytest/py3/_pytest/fixtures.py index 273bcafd39..fddff931c5 100644 --- a/contrib/python/pytest/py3/_pytest/fixtures.py +++ b/contrib/python/pytest/py3/_pytest/fixtures.py @@ -5,6 +5,8 @@ import sys import warnings from collections import defaultdict from collections import deque +from contextlib import suppress +from pathlib import Path from types import TracebackType from typing import Any from typing import Callable @@ -15,6 +17,7 @@ from typing import Generic from typing import Iterable from typing import Iterator from typing import List +from typing import MutableMapping from typing import Optional from typing import overload from typing import Sequence @@ -26,7 +29,6 @@ from typing import TypeVar from typing import Union import attr -import py import _pytest from _pytest import nodes @@ -58,34 +60,36 @@ from _pytest.mark.structures import MarkDecorator from _pytest.outcomes import fail from _pytest.outcomes import TEST_OUTCOME from _pytest.pathlib import absolutepath -from _pytest.store import StoreKey +from _pytest.pathlib import bestrelpath +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope +from _pytest.stash import StashKey + if TYPE_CHECKING: from typing import Deque from typing import NoReturn - from typing_extensions import Literal + from _pytest.scope import _ScopeName from _pytest.main import Session from _pytest.python import CallSpec2 from _pytest.python import Function from _pytest.python import Metafunc - _Scope = Literal["session", "package", "module", "class", "function"] - # The value of the fixture -- return/yield of the fixture function (type variable). -_FixtureValue = TypeVar("_FixtureValue") +FixtureValue = TypeVar("FixtureValue") # The type of the fixture function (type variable). -_FixtureFunction = TypeVar("_FixtureFunction", bound=Callable[..., object]) +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) # The type of a fixture function (type alias generic in fixture value). _FixtureFunc = Union[ - Callable[..., _FixtureValue], Callable[..., Generator[_FixtureValue, None, None]] + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]] ] # The type of FixtureDef.cached_result (type alias generic in fixture value). _FixtureCachedResult = Union[ Tuple[ # The result. - _FixtureValue, + FixtureValue, # Cache key. object, None, @@ -100,10 +104,10 @@ _FixtureCachedResult = Union[ ] -@attr.s(frozen=True) -class PseudoFixtureDef(Generic[_FixtureValue]): - cached_result = attr.ib(type="_FixtureCachedResult[_FixtureValue]") - scope = attr.ib(type="_Scope") +@attr.s(frozen=True, auto_attribs=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: "_FixtureCachedResult[FixtureValue]" + _scope: Scope def pytest_sessionstart(session: "Session") -> None: @@ -126,26 +130,26 @@ def get_scope_package(node, fixturedef: "FixtureDef[object]"): def get_scope_node( - node: nodes.Node, scope: "_Scope" + node: nodes.Node, scope: Scope ) -> Optional[Union[nodes.Item, nodes.Collector]]: import _pytest.python - if scope == "function": + if scope is Scope.Function: return node.getparent(nodes.Item) - elif scope == "class": + elif scope is Scope.Class: return node.getparent(_pytest.python.Class) - elif scope == "module": + elif scope is Scope.Module: return node.getparent(_pytest.python.Module) - elif scope == "package": + elif scope is Scope.Package: return node.getparent(_pytest.python.Package) - elif scope == "session": + elif scope is Scope.Session: return node.getparent(_pytest.main.Session) else: assert_never(scope) # Used for storing artificial fixturedefs for direct parametrization. -name2pseudofixturedef_key = StoreKey[Dict[str, "FixtureDef[Any]"]]() +name2pseudofixturedef_key = StashKey[Dict[str, "FixtureDef[Any]"]]() def add_funcarg_pseudo_fixture_def( @@ -162,7 +166,7 @@ def add_funcarg_pseudo_fixture_def( return # Collect funcargs of all callspecs into a list of values. arg2params: Dict[str, List[object]] = {} - arg2scope: Dict[str, _Scope] = {} + arg2scope: Dict[str, Scope] = {} for callspec in metafunc._calls: for argname, argvalue in callspec.funcargs.items(): assert argname not in callspec.params @@ -171,8 +175,8 @@ def add_funcarg_pseudo_fixture_def( callspec.indices[argname] = len(arg2params_list) arg2params_list.append(argvalue) if argname not in arg2scope: - scopenum = callspec._arg2scopenum.get(argname, scopenum_function) - arg2scope[argname] = scopes[scopenum] + scope = callspec._arg2scope.get(argname, Scope.Function) + arg2scope[argname] = scope callspec.funcargs.clear() # Register artificial FixtureDef's so that later at test execution @@ -185,17 +189,19 @@ def add_funcarg_pseudo_fixture_def( # node related to the scope. scope = arg2scope[argname] node = None - if scope != "function": + if scope is not Scope.Function: node = get_scope_node(collector, scope) if node is None: - assert scope == "class" and isinstance(collector, _pytest.python.Module) + assert scope is Scope.Class and isinstance( + collector, _pytest.python.Module + ) # Use module-level collector for class-scope (for now). node = collector if node is None: name2pseudofixturedef = None else: default: Dict[str, FixtureDef[Any]] = {} - name2pseudofixturedef = node._store.setdefault( + name2pseudofixturedef = node.stash.setdefault( name2pseudofixturedef_key, default ) if name2pseudofixturedef is not None and argname in name2pseudofixturedef: @@ -234,10 +240,10 @@ def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: _Key = Tuple[object, ...] -def get_parametrized_fixture_keys(item: nodes.Item, scopenum: int) -> Iterator[_Key]: +def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_Key]: """Return list of keys for all parametrized arguments which match - the specified scope. """ - assert scopenum < scopenum_function # function + the specified scope.""" + assert scope is not Scope.Function try: callspec = item.callspec # type: ignore[attr-defined] except AttributeError: @@ -248,67 +254,71 @@ def get_parametrized_fixture_keys(item: nodes.Item, scopenum: int) -> Iterator[_ # sort this so that different calls to # get_parametrized_fixture_keys will be deterministic. for argname, param_index in sorted(cs.indices.items()): - if cs._arg2scopenum[argname] != scopenum: + if cs._arg2scope[argname] != scope: continue - if scopenum == 0: # session + if scope is Scope.Session: key: _Key = (argname, param_index) - elif scopenum == 1: # package - key = (argname, param_index, item.fspath.dirpath()) - elif scopenum == 2: # module - key = (argname, param_index, item.fspath) - elif scopenum == 3: # class + elif scope is Scope.Package: + key = (argname, param_index, item.path.parent) + elif scope is Scope.Module: + key = (argname, param_index, item.path) + elif scope is Scope.Class: item_cls = item.cls # type: ignore[attr-defined] - key = (argname, param_index, item.fspath, item_cls) + key = (argname, param_index, item.path, item_cls) + else: + assert_never(scope) yield key # Algorithm for sorting on a per-parametrized resource setup basis. -# It is called for scopenum==0 (session) first and performs sorting +# It is called for Session scope first and performs sorting # down to the lower scopes such as to minimize number of "high scope" # setups and teardowns. def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: - argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]] = {} - items_by_argkey: Dict[int, Dict[_Key, Deque[nodes.Item]]] = {} - for scopenum in range(0, scopenum_function): + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]] = {} + items_by_argkey: Dict[Scope, Dict[_Key, Deque[nodes.Item]]] = {} + for scope in HIGH_SCOPES: d: Dict[nodes.Item, Dict[_Key, None]] = {} - argkeys_cache[scopenum] = d + argkeys_cache[scope] = d item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque) - items_by_argkey[scopenum] = item_d + items_by_argkey[scope] = item_d for item in items: - keys = dict.fromkeys(get_parametrized_fixture_keys(item, scopenum), None) + keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None) if keys: d[item] = keys for key in keys: item_d[key].append(item) items_dict = dict.fromkeys(items, None) - return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, 0)) + return list( + reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session) + ) def fix_cache_order( item: nodes.Item, - argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]], - items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]], + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], ) -> None: - for scopenum in range(0, scopenum_function): - for key in argkeys_cache[scopenum].get(item, []): - items_by_argkey[scopenum][key].appendleft(item) + for scope in HIGH_SCOPES: + for key in argkeys_cache[scope].get(item, []): + items_by_argkey[scope][key].appendleft(item) def reorder_items_atscope( items: Dict[nodes.Item, None], - argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]], - items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]], - scopenum: int, + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], + scope: Scope, ) -> Dict[nodes.Item, None]: - if scopenum >= scopenum_function or len(items) < 3: + if scope is Scope.Function or len(items) < 3: return items ignore: Set[Optional[_Key]] = set() items_deque = deque(items) items_done: Dict[nodes.Item, None] = {} - scoped_items_by_argkey = items_by_argkey[scopenum] - scoped_argkeys_cache = argkeys_cache[scopenum] + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_cache = argkeys_cache[scope] while items_deque: no_argkey_group: Dict[nodes.Item, None] = {} slicing_argkey = None @@ -334,7 +344,7 @@ def reorder_items_atscope( break if no_argkey_group: no_argkey_group = reorder_items_atscope( - no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + no_argkey_group, argkeys_cache, items_by_argkey, scope.next_lower() ) for item in no_argkey_group: items_done[item] = None @@ -369,12 +379,10 @@ def _fill_fixtures_impl(function: "Function") -> None: fi = fm.getfixtureinfo(function.parent, function.obj, None) function._fixtureinfo = fi request = function._request = FixtureRequest(function, _ispytest=True) + fm.session._setupstate.setup(function) request._fillfixtures() # Prune out funcargs for jstests. - newfuncargs = {} - for name in fi.argnames: - newfuncargs[name] = function.funcargs[name] - function.funcargs = newfuncargs + function.funcargs = {name: function.funcargs[name] for name in fi.argnames} else: request._fillfixtures() @@ -383,16 +391,16 @@ def get_direct_param_fixture_func(request): return request.param -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class FuncFixtureInfo: # Original function argument names. - argnames = attr.ib(type=Tuple[str, ...]) + argnames: Tuple[str, ...] # Argnames that function immediately requires. These include argnames + # fixture names specified via usefixtures and via autouse=True in fixture # definitions. - initialnames = attr.ib(type=Tuple[str, ...]) - names_closure = attr.ib(type=List[str]) - name2fixturedefs = attr.ib(type=Dict[str, Sequence["FixtureDef[Any]"]]) + initialnames: Tuple[str, ...] + names_closure: List[str] + name2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]] def prune_dependency_tree(self) -> None: """Recompute names_closure from initialnames and name2fixturedefs. @@ -435,13 +443,17 @@ class FixtureRequest: self._pyfuncitem = pyfuncitem #: Fixture for which this request is being performed. self.fixturename: Optional[str] = None - #: Scope string, one of "function", "class", "module", "session". - self.scope: _Scope = "function" + self._scope = Scope.Function self._fixture_defs: Dict[str, FixtureDef[Any]] = {} fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() self._arg2index: Dict[str, int] = {} - self._fixturemanager: FixtureManager = (pyfuncitem.session._fixturemanager) + self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager + + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value @property def fixturenames(self) -> List[str]: @@ -453,7 +465,7 @@ class FixtureRequest: @property def node(self): """Underlying collection node (depends on current request scope).""" - return self._getscopeitem(self.scope) + return self._getscopeitem(self._scope) def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]": fixturedefs = self._arg2fixturedefs.get(argname, None) @@ -515,17 +527,17 @@ class FixtureRequest: return self._pyfuncitem.getparent(_pytest.python.Module).obj @property - def fspath(self) -> py.path.local: - """The file system path of the test module which collected this test.""" + def path(self) -> Path: if self.scope not in ("function", "class", "module", "package"): - raise AttributeError(f"module not available in {self.scope}-scoped context") + raise AttributeError(f"path not available in {self.scope}-scoped context") # TODO: Remove ignore once _pyfuncitem is properly typed. - return self._pyfuncitem.fspath # type: ignore + return self._pyfuncitem.path # type: ignore @property - def keywords(self): + def keywords(self) -> MutableMapping[str, Any]: """Keywords/markers dictionary for the underlying node.""" - return self.node.keywords + node: nodes.Node = self.node + return node.keywords @property def session(self) -> "Session": @@ -539,10 +551,8 @@ class FixtureRequest: self._addfinalizer(finalizer, scope=self.scope) def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None: - colitem = self._getscopeitem(scope) - self._pyfuncitem.session._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem - ) + node = self._getscopeitem(scope) + node.addfinalizer(finalizer) def applymarker(self, marker: Union[str, MarkDecorator]) -> None: """Apply a marker to a single test function invocation. @@ -551,7 +561,7 @@ class FixtureRequest: on all function invocations. :param marker: - A :py:class:`_pytest.mark.MarkDecorator` object created by a call + A :class:`pytest.MarkDecorator` object created by a call to ``pytest.mark.NAME(...)``. """ self.node.add_marker(marker) @@ -593,8 +603,7 @@ class FixtureRequest: except FixtureLookupError: if argname == "request": cached_result = (self, [0], None) - scope: _Scope = "function" - return PseudoFixtureDef(cached_result, scope) + return PseudoFixtureDef(cached_result, Scope.Function) raise # Remove indent to prevent the python3 exception # from leaking into the call. @@ -605,14 +614,11 @@ class FixtureRequest: def _get_fixturestack(self) -> List["FixtureDef[Any]"]: current = self values: List[FixtureDef[Any]] = [] - while 1: - fixturedef = getattr(current, "_fixturedef", None) - if fixturedef is None: - values.reverse() - return values - values.append(fixturedef) - assert isinstance(current, SubRequest) + while isinstance(current, SubRequest): + values.append(current._fixturedef) # type: ignore[has-type] current = current._parent_request + values.reverse() + return values def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None: """Create a SubRequest based on "self" and call the execute method @@ -626,7 +632,7 @@ class FixtureRequest: # (latter managed by fixturedef) argname = fixturedef.argname funcitem = self._pyfuncitem - scope = fixturedef.scope + scope = fixturedef._scope try: param = funcitem.callspec.getparam(argname) except (AttributeError, ValueError): @@ -648,12 +654,13 @@ class FixtureRequest: if has_params: frame = inspect.stack()[3] frameinfo = inspect.getframeinfo(frame[0]) - source_path = py.path.local(frameinfo.filename) + source_path = absolutepath(frameinfo.filename) source_lineno = frameinfo.lineno - rel_source_path = source_path.relto(funcitem.config.rootdir) - if rel_source_path: - source_path_str = rel_source_path - else: + try: + source_path_str = str( + source_path.relative_to(funcitem.config.rootpath) + ) + except ValueError: source_path_str = str(source_path) msg = ( "The requested fixture has no parameter defined for test:\n" @@ -662,7 +669,7 @@ class FixtureRequest: "\n\nRequested here:\n{}:{}".format( funcitem.nodeid, fixturedef.argname, - getlocation(fixturedef.func, funcitem.config.rootdir), + getlocation(fixturedef.func, funcitem.config.rootpath), source_path_str, source_lineno, ) @@ -672,16 +679,15 @@ class FixtureRequest: param_index = funcitem.callspec.indices[argname] # If a parametrize invocation set a scope it will override # the static scope defined with the fixture function. - paramscopenum = funcitem.callspec._arg2scopenum.get(argname) - if paramscopenum is not None: - scope = scopes[paramscopenum] + with suppress(KeyError): + scope = funcitem.callspec._arg2scope[argname] subrequest = SubRequest( self, scope, param, param_index, fixturedef, _ispytest=True ) # Check if a higher-level scoped fixture accesses a lower level one. - subrequest._check_scope(argname, self.scope, scope) + subrequest._check_scope(argname, self._scope, scope) try: # Call the fixture function. fixturedef.execute(request=subrequest) @@ -692,23 +698,23 @@ class FixtureRequest: self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" ) -> None: # If fixture function failed it might have registered finalizers. - self.session._setupstate.addfinalizer( - functools.partial(fixturedef.finish, request=subrequest), subrequest.node - ) + subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest)) def _check_scope( - self, argname: str, invoking_scope: "_Scope", requested_scope: "_Scope", + self, + argname: str, + invoking_scope: Scope, + requested_scope: Scope, ) -> None: if argname == "request": return - if scopemismatch(invoking_scope, requested_scope): + if invoking_scope > requested_scope: # Try to report something helpful. - lines = self._factorytraceback() + text = "\n".join(self._factorytraceback()) fail( - "ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" - % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {invoking_scope.value} scoped request object, " + f"involved factories:\n{text}", pytrace=False, ) @@ -717,22 +723,30 @@ class FixtureRequest: for fixturedef in self._get_fixturestack(): factory = fixturedef.func fs, lineno = getfslineno(factory) - p = self._pyfuncitem.session.fspath.bestrelpath(fs) + if isinstance(fs, Path): + session: Session = self._pyfuncitem.session + p = bestrelpath(session.path, fs) + else: + p = fs args = _format_args(factory) lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) return lines - def _getscopeitem(self, scope: "_Scope") -> Union[nodes.Item, nodes.Collector]: - if scope == "function": + def _getscopeitem( + self, scope: Union[Scope, "_ScopeName"] + ) -> Union[nodes.Item, nodes.Collector]: + if isinstance(scope, str): + scope = Scope(scope) + if scope is Scope.Function: # This might also be a non-function Item despite its attribute name. node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem - elif scope == "package": + elif scope is Scope.Package: # FIXME: _fixturedef is not defined on FixtureRequest (this class), # but on FixtureRequest (a subclass). node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined] else: node = get_scope_node(self._pyfuncitem, scope) - if node is None and scope == "class": + if node is None and scope is Scope.Class: # Fallback to function item itself. node = self._pyfuncitem assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( @@ -751,8 +765,8 @@ class SubRequest(FixtureRequest): def __init__( self, request: "FixtureRequest", - scope: "_Scope", - param, + scope: Scope, + param: Any, param_index: int, fixturedef: "FixtureDef[object]", *, @@ -764,7 +778,7 @@ class SubRequest(FixtureRequest): if param is not NOTSET: self.param = param self.param_index = param_index - self.scope = scope + self._scope = scope self._fixturedef = fixturedef self._pyfuncitem = request._pyfuncitem self._fixture_defs = request._fixture_defs @@ -793,29 +807,6 @@ class SubRequest(FixtureRequest): super()._schedule_finalizers(fixturedef, subrequest) -scopes: List["_Scope"] = ["session", "package", "module", "class", "function"] -scopenum_function = scopes.index("function") - - -def scopemismatch(currentscope: "_Scope", newscope: "_Scope") -> bool: - return scopes.index(newscope) > scopes.index(currentscope) - - -def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int: - """Look up the index of ``scope`` and raise a descriptive value error - if not defined.""" - strscopes: Sequence[str] = scopes - try: - return strscopes.index(scope) - except ValueError: - fail( - "{} {}got an unexpected scope value '{}'".format( - descr, f"from {where} " if where else "", scope - ), - pytrace=False, - ) - - @final class FixtureLookupError(LookupError): """Could not return a requested fixture (missing or invalid).""" @@ -846,7 +837,7 @@ class FixtureLookupError(LookupError): error_msg = "file %s, line %s: source code not available" addline(error_msg % (fspath, lineno + 1)) else: - addline("file {}, line {}".format(fspath, lineno + 1)) + addline(f"file {fspath}, line {lineno + 1}") for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) @@ -876,7 +867,7 @@ class FixtureLookupError(LookupError): class FixtureLookupErrorRepr(TerminalRepr): def __init__( self, - filename: Union[str, py.path.local], + filename: Union[str, "os.PathLike[str]"], firstlineno: int, tblines: Sequence[str], errorstring: str, @@ -895,30 +886,31 @@ class FixtureLookupErrorRepr(TerminalRepr): lines = self.errorstring.split("\n") if lines: tw.line( - "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", red=True, ) for line in lines[1:]: tw.line( - f"{FormattedExcinfo.flow_marker} {line.strip()}", red=True, + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, ) tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) + tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1)) def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn": fs, lineno = getfslineno(fixturefunc) - location = "{}:{}".format(fs, lineno + 1) + location = f"{fs}:{lineno + 1}" source = _pytest._code.Source(fixturefunc) fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) def call_fixture_func( - fixturefunc: "_FixtureFunc[_FixtureValue]", request: FixtureRequest, kwargs -) -> _FixtureValue: + fixturefunc: "_FixtureFunc[FixtureValue]", request: FixtureRequest, kwargs +) -> FixtureValue: if is_generator(fixturefunc): fixturefunc = cast( - Callable[..., Generator[_FixtureValue, None, None]], fixturefunc + Callable[..., Generator[FixtureValue, None, None]], fixturefunc ) generator = fixturefunc(**kwargs) try: @@ -928,7 +920,7 @@ def call_fixture_func( finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) request.addfinalizer(finalizer) else: - fixturefunc = cast(Callable[..., _FixtureValue], fixturefunc) + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) fixture_result = fixturefunc(**kwargs) return fixture_result @@ -946,10 +938,10 @@ def _teardown_yield_fixture(fixturefunc, it) -> None: def _eval_scope_callable( - scope_callable: "Callable[[str, Config], _Scope]", + scope_callable: "Callable[[str, Config], _ScopeName]", fixture_name: str, config: Config, -) -> "_Scope": +) -> "_ScopeName": try: # Type ignored because there is no typing mechanism to specify # keyword arguments, currently. @@ -971,7 +963,7 @@ def _eval_scope_callable( @final -class FixtureDef(Generic[_FixtureValue]): +class FixtureDef(Generic[FixtureValue]): """A container for a factory definition.""" def __init__( @@ -979,8 +971,8 @@ class FixtureDef(Generic[_FixtureValue]): fixturemanager: "FixtureManager", baseid: Optional[str], argname: str, - func: "_FixtureFunc[_FixtureValue]", - scope: "Union[_Scope, Callable[[str, Config], _Scope]]", + func: "_FixtureFunc[FixtureValue]", + scope: Union[Scope, "_ScopeName", Callable[[str, Config], "_ScopeName"], None], params: Optional[Sequence[object]], unittest: bool = False, ids: Optional[ @@ -995,26 +987,30 @@ class FixtureDef(Generic[_FixtureValue]): self.has_location = baseid is not None self.func = func self.argname = argname - if callable(scope): - scope_ = _eval_scope_callable(scope, argname, fixturemanager.config) - else: - scope_ = scope - self.scopenum = scope2index( - # TODO: Check if the `or` here is really necessary. - scope_ or "function", # type: ignore[unreachable] - descr=f"Fixture '{func.__name__}'", - where=baseid, - ) - self.scope = scope_ + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, fixturemanager.config) + + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope = scope self.params: Optional[Sequence[object]] = params self.argnames: Tuple[str, ...] = getfuncargnames( func, name=argname, is_method=unittest ) self.unittest = unittest self.ids = ids - self.cached_result: Optional[_FixtureCachedResult[_FixtureValue]] = None + self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None self._finalizers: List[Callable[[], object]] = [] + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + def addfinalizer(self, finalizer: Callable[[], object]) -> None: self._finalizers.append(finalizer) @@ -1033,7 +1029,7 @@ class FixtureDef(Generic[_FixtureValue]): if exc: raise exc finally: - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook = self._fixturemanager.session.gethookproxy(request.node.path) hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) # Even if finalization fails, we invalidate the cached fixture # value and remove all finalizers because they may be bound methods @@ -1041,7 +1037,7 @@ class FixtureDef(Generic[_FixtureValue]): self.cached_result = None self._finalizers = [] - def execute(self, request: SubRequest) -> _FixtureValue: + def execute(self, request: SubRequest) -> FixtureValue: # Get required arguments and register our own finish() # with their finalization. for argname in self.argnames: @@ -1068,7 +1064,7 @@ class FixtureDef(Generic[_FixtureValue]): self.finish(request) assert self.cached_result is None - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook = self._fixturemanager.session.gethookproxy(request.node.path) result = hook.pytest_fixture_setup(fixturedef=self, request=request) return result @@ -1082,8 +1078,8 @@ class FixtureDef(Generic[_FixtureValue]): def resolve_fixture_function( - fixturedef: FixtureDef[_FixtureValue], request: FixtureRequest -) -> "_FixtureFunc[_FixtureValue]": + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> "_FixtureFunc[FixtureValue]": """Get the actual callable that can be called to obtain the fixture value, dealing with unittest-specific instances and bound methods.""" fixturefunc = fixturedef.func @@ -1109,15 +1105,15 @@ def resolve_fixture_function( def pytest_fixture_setup( - fixturedef: FixtureDef[_FixtureValue], request: SubRequest -) -> _FixtureValue: + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: """Execution of fixture setup.""" kwargs = {} for argname in fixturedef.argnames: fixdef = request._get_active_fixturedef(argname) assert fixdef.cached_result is not None result, arg_cache_key, exc = fixdef.cached_result - request._check_scope(argname, request.scope, fixdef.scope) + request._check_scope(argname, request._scope, fixdef._scope) kwargs[argname] = result fixturefunc = resolve_fixture_function(fixturedef, request) @@ -1160,14 +1156,15 @@ def _params_converter( def wrap_function_to_error_out_if_called_directly( - function: _FixtureFunction, fixture_marker: "FixtureFunctionMarker", -) -> _FixtureFunction: + function: FixtureFunction, + fixture_marker: "FixtureFunctionMarker", +) -> FixtureFunction: """Wrap the given fixture function so we can raise an error about it being called directly, instead of used as an argument in a test function.""" message = ( 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' "but are created automatically when test functions request them as parameters.\n" - "See https://docs.pytest.org/en/stable/fixture.html for more information about fixtures, and\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." ).format(name=fixture_marker.name or function.__name__) @@ -1179,26 +1176,25 @@ def wrap_function_to_error_out_if_called_directly( # further than this point and lose useful wrappings like @mock.patch (#3774). result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] - return cast(_FixtureFunction, result) + return cast(FixtureFunction, result) @final -@attr.s(frozen=True) +@attr.s(frozen=True, auto_attribs=True) class FixtureFunctionMarker: - scope = attr.ib(type="Union[_Scope, Callable[[str, Config], _Scope]]") - params = attr.ib(type=Optional[Tuple[object, ...]], converter=_params_converter) - autouse = attr.ib(type=bool, default=False) - ids = attr.ib( - type=Union[ - Tuple[Union[None, str, float, int, bool], ...], - Callable[[Any], Optional[object]], - ], + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" + params: Optional[Tuple[object, ...]] = attr.ib(converter=_params_converter) + autouse: bool = False + ids: Union[ + Tuple[Union[None, str, float, int, bool], ...], + Callable[[Any], Optional[object]], + ] = attr.ib( default=None, converter=_ensure_immutable_ids, ) - name = attr.ib(type=Optional[str], default=None) + name: Optional[str] = None - def __call__(self, function: _FixtureFunction) -> _FixtureFunction: + def __call__(self, function: FixtureFunction) -> FixtureFunction: if inspect.isclass(function): raise ValueError("class fixtures not supported (maybe in the future)") @@ -1226,9 +1222,9 @@ class FixtureFunctionMarker: @overload def fixture( - fixture_function: _FixtureFunction, + fixture_function: FixtureFunction, *, - scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ..., + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., params: Optional[Iterable[object]] = ..., autouse: bool = ..., ids: Optional[ @@ -1238,7 +1234,7 @@ def fixture( ] ] = ..., name: Optional[str] = ..., -) -> _FixtureFunction: +) -> FixtureFunction: ... @@ -1246,7 +1242,7 @@ def fixture( def fixture( fixture_function: None = ..., *, - scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ..., + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., params: Optional[Iterable[object]] = ..., autouse: bool = ..., ids: Optional[ @@ -1261,9 +1257,9 @@ def fixture( def fixture( - fixture_function: Optional[_FixtureFunction] = None, + fixture_function: Optional[FixtureFunction] = None, *, - scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = "function", + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = "function", params: Optional[Iterable[object]] = None, autouse: bool = False, ids: Optional[ @@ -1273,7 +1269,7 @@ def fixture( ] ] = None, name: Optional[str] = None, -) -> Union[FixtureFunctionMarker, _FixtureFunction]: +) -> Union[FixtureFunctionMarker, FixtureFunction]: """Decorator to mark a fixture factory function. This decorator can be used, with or without parameters, to define a @@ -1325,7 +1321,11 @@ def fixture( ``@pytest.fixture(name='<fixturename>')``. """ fixture_marker = FixtureFunctionMarker( - scope=scope, params=params, autouse=autouse, ids=ids, name=name, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, ) # Direct decoration. @@ -1363,7 +1363,8 @@ def yield_fixture( @fixture(scope="session") def pytestconfig(request: FixtureRequest) -> Config: - """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. Example:: @@ -1537,15 +1538,15 @@ class FixtureManager: arg2fixturedefs[argname] = fixturedefs merge(fixturedefs[-1].argnames) - def sort_by_scope(arg_name: str) -> int: + def sort_by_scope(arg_name: str) -> Scope: try: fixturedefs = arg2fixturedefs[arg_name] except KeyError: - return scopes.index("function") + return Scope.Function else: - return fixturedefs[-1].scopenum + return fixturedefs[-1]._scope - fixturenames_closure.sort(key=sort_by_scope) + fixturenames_closure.sort(key=sort_by_scope, reverse=True) return initialnames, fixturenames_closure, arg2fixturedefs def pytest_generate_tests(self, metafunc: "Metafunc") -> None: @@ -1611,6 +1612,11 @@ class FixtureManager: self._holderobjseen.add(holderobj) autousenames = [] for name in dir(holderobj): + # ugly workaround for one of the fspath deprecated property of node + # todo: safely generalize + if isinstance(holderobj, nodes.Node) and name == "fspath": + continue + # The attribute can be an arbitrary descriptor, so the attribute # access below can raise. safe_getatt() ignores such exceptions. obj = safe_getattr(holderobj, name, None) diff --git a/contrib/python/pytest/py3/_pytest/freeze_support.py b/contrib/python/pytest/py3/_pytest/freeze_support.py index 8b93ed5f7f..9f8ea231fe 100644 --- a/contrib/python/pytest/py3/_pytest/freeze_support.py +++ b/contrib/python/pytest/py3/_pytest/freeze_support.py @@ -9,16 +9,15 @@ from typing import Union def freeze_includes() -> List[str]: """Return a list of module names used by pytest that should be included by cx_freeze.""" - import py import _pytest - result = list(_iter_all_modules(py)) - result += list(_iter_all_modules(_pytest)) + result = list(_iter_all_modules(_pytest)) return result def _iter_all_modules( - package: Union[str, types.ModuleType], prefix: str = "", + package: Union[str, types.ModuleType], + prefix: str = "", ) -> Iterator[str]: """Iterate over the names of all modules that can be found in the given package, recursively. diff --git a/contrib/python/pytest/py3/_pytest/helpconfig.py b/contrib/python/pytest/py3/_pytest/helpconfig.py index 4384d07b26..aca2cd391e 100644 --- a/contrib/python/pytest/py3/_pytest/helpconfig.py +++ b/contrib/python/pytest/py3/_pytest/helpconfig.py @@ -6,8 +6,6 @@ from typing import List from typing import Optional from typing import Union -import py - import pytest from _pytest.config import Config from _pytest.config import ExitCode @@ -51,7 +49,7 @@ def pytest_addoption(parser: Parser) -> None: action="count", default=0, dest="version", - help="display pytest version and information about plugins." + help="display pytest version and information about plugins. " "When given twice, also display information about plugins.", ) group._addoption( @@ -80,10 +78,14 @@ def pytest_addoption(parser: Parser) -> None: ) group.addoption( "--debug", - action="store_true", + action="store", + nargs="?", + const="pytestdebug.log", dest="debug", - default=False, - help="store internal tracing debug information in 'pytestdebug.log'.", + metavar="DEBUG_FILE_NAME", + help="store internal tracing debug information in this log file.\n" + "This file is opened with 'w' and truncated as a result, care advised.\n" + "Defaults to 'pytestdebug.log'.", ) group._addoption( "-o", @@ -98,15 +100,16 @@ def pytest_addoption(parser: Parser) -> None: def pytest_cmdline_parse(): outcome = yield config: Config = outcome.get_result() + if config.option.debug: - path = os.path.abspath("pytestdebug.log") + # --debug | --debug <file.log> was provided. + path = config.option.debug debugfile = open(path, "w") debugfile.write( - "versions pytest-%s, py-%s, " + "versions pytest-%s, " "python-%s\ncwd=%s\nargs=%s\n\n" % ( pytest.__version__, - py.__version__, ".".join(map(str, sys.version_info)), os.getcwd(), config.invocation_params.args, @@ -114,11 +117,11 @@ def pytest_cmdline_parse(): ) config.trace.root.setwriter(debugfile.write) undo_tracing = config.pluginmanager.enable_tracing() - sys.stderr.write("writing pytestdebug information to %s\n" % path) + sys.stderr.write("writing pytest debug information to %s\n" % path) def unset_tracing() -> None: debugfile.close() - sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) + sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name) config.trace.root.setwriter(None) undo_tracing() @@ -127,7 +130,7 @@ def pytest_cmdline_parse(): def showversion(config: Config) -> None: if config.option.version > 1: - sys.stderr.write( + sys.stdout.write( "This is pytest version {}, imported from {}\n".format( pytest.__version__, pytest.__file__ ) @@ -135,9 +138,9 @@ def showversion(config: Config) -> None: plugininfo = getpluginversioninfo(config) if plugininfo: for line in plugininfo: - sys.stderr.write(line + "\n") + sys.stdout.write(line + "\n") else: - sys.stderr.write(f"pytest {pytest.__version__}\n") + sys.stdout.write(f"pytest {pytest.__version__}\n") def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: @@ -243,7 +246,7 @@ def getpluginversioninfo(config: Config) -> List[str]: def pytest_report_header(config: Config) -> List[str]: lines = [] if config.option.debug or config.option.traceconfig: - lines.append(f"using: pytest-{pytest.__version__} pylib-{py.__version__}") + lines.append(f"using: pytest-{pytest.__version__}") verinfo = getpluginversioninfo(config) if verinfo: diff --git a/contrib/python/pytest/py3/_pytest/hookspec.py b/contrib/python/pytest/py3/_pytest/hookspec.py index e499b742c7..79251315d8 100644 --- a/contrib/python/pytest/py3/_pytest/hookspec.py +++ b/contrib/python/pytest/py3/_pytest/hookspec.py @@ -1,5 +1,6 @@ """Hook specifications for pytest plugins which are invoked by pytest itself and by builtin plugins.""" +from pathlib import Path from typing import Any from typing import Dict from typing import List @@ -10,10 +11,10 @@ from typing import Tuple from typing import TYPE_CHECKING from typing import Union -import py.path from pluggy import HookspecMarker from _pytest.deprecated import WARNING_CAPTURED_HOOK +from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK if TYPE_CHECKING: import pdb @@ -41,6 +42,7 @@ if TYPE_CHECKING: from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.terminal import TerminalReporter + from _pytest.compat import LEGACY_PATH hookspec = HookspecMarker("pytest") @@ -55,7 +57,7 @@ def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None: """Called at plugin registration time to allow adding new hooks via a call to ``pluginmanager.add_hookspecs(module_or_class, prefix)``. - :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager. + :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. .. note:: This hook is incompatible with ``hookwrapper=True``. @@ -69,7 +71,7 @@ def pytest_plugin_registered( """A new pytest plugin got registered. :param plugin: The plugin module or instance. - :param _pytest.config.PytestPluginManager manager: pytest plugin manager. + :param pytest.PytestPluginManager manager: pytest plugin manager. .. note:: This hook is incompatible with ``hookwrapper=True``. @@ -87,24 +89,24 @@ def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> files situated at the tests root directory due to how pytest :ref:`discovers plugins during startup <pluginorder>`. - :param _pytest.config.argparsing.Parser parser: + :param pytest.Parser parser: To add command line options, call - :py:func:`parser.addoption(...) <_pytest.config.argparsing.Parser.addoption>`. + :py:func:`parser.addoption(...) <pytest.Parser.addoption>`. To add ini-file values call :py:func:`parser.addini(...) - <_pytest.config.argparsing.Parser.addini>`. + <pytest.Parser.addini>`. - :param _pytest.config.PytestPluginManager pluginmanager: - pytest plugin manager, which can be used to install :py:func:`hookspec`'s + :param pytest.PytestPluginManager pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`hookspec`'s or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks to change how command line options are added. Options can later be accessed through the - :py:class:`config <_pytest.config.Config>` object, respectively: + :py:class:`config <pytest.Config>` object, respectively: - - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + - :py:func:`config.getoption(name) <pytest.Config.getoption>` to retrieve the value of a command line option. - - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + - :py:func:`config.getini(name) <pytest.Config.getini>` to retrieve a value read from an ini-style file. The config object is passed around on many internal objects via the ``.config`` @@ -128,7 +130,7 @@ def pytest_configure(config: "Config") -> None: .. note:: This hook is incompatible with ``hookwrapper=True``. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. """ @@ -151,21 +153,22 @@ def pytest_cmdline_parse( ``plugins`` arg when using `pytest.main`_ to perform an in-process test run. - :param _pytest.config.PytestPluginManager pluginmanager: Pytest plugin manager. + :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. :param List[str] args: List of arguments passed on the command line. """ +@hookspec(warn_on_impl=WARNING_CMDLINE_PREPARSE_HOOK) def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None: """(**Deprecated**) modify command line arguments before option parsing. This hook is considered deprecated and will be removed in a future pytest version. Consider - using :func:`pytest_load_initial_conftests` instead. + using :hook:`pytest_load_initial_conftests` instead. .. note:: This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :param List[str] args: Arguments passed on the command line. """ @@ -175,12 +178,9 @@ def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]: """Called for performing the main command line action. The default implementation will invoke the configure hooks and runtest_mainloop. - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - Stops at first non-None result, see :ref:`firstresult`. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. """ @@ -193,9 +193,9 @@ def pytest_load_initial_conftests( .. note:: This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - :param _pytest.config.Config early_config: The pytest config object. + :param pytest.Config early_config: The pytest config object. :param List[str] args: Arguments passed on the command line. - :param _pytest.config.argparsing.Parser parser: To add command line options. + :param pytest.Parser parser: To add command line options. """ @@ -248,7 +248,7 @@ def pytest_collection_modifyitems( the items in-place. :param pytest.Session session: The pytest session object. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :param List[pytest.Item] items: List of item objects. """ @@ -261,7 +261,9 @@ def pytest_collection_finish(session: "Session") -> None: @hookspec(firstresult=True) -def pytest_ignore_collect(path: py.path.local, config: "Config") -> Optional[bool]: +def pytest_ignore_collect( + collection_path: Path, path: "LEGACY_PATH", config: "Config" +) -> Optional[bool]: """Return True to prevent considering this path for collection. This hook is consulted for all files and directories prior to calling @@ -269,19 +271,31 @@ def pytest_ignore_collect(path: py.path.local, config: "Config") -> Optional[boo Stops at first non-None result, see :ref:`firstresult`. - :param py.path.local path: The path to analyze. - :param _pytest.config.Config config: The pytest config object. + :param pathlib.Path collection_path : The path to analyze. + :param LEGACY_PATH path: The path to analyze (deprecated). + :param pytest.Config config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. """ def pytest_collect_file( - path: py.path.local, parent: "Collector" + file_path: Path, path: "LEGACY_PATH", parent: "Collector" ) -> "Optional[Collector]": """Create a Collector for the given path, or None if not relevant. The new node needs to have the specified ``parent`` as a parent. - :param py.path.local path: The path to collect. + :param pathlib.Path file_path: The path to analyze. + :param LEGACY_PATH path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. """ @@ -309,7 +323,8 @@ def pytest_deselected(items: Sequence["Item"]) -> None: @hookspec(firstresult=True) def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]": - """Perform ``collector.collect()`` and return a CollectReport. + """Perform :func:`collector.collect() <pytest.Collector.collect>` and return + a :class:`~pytest.CollectReport`. Stops at first non-None result, see :ref:`firstresult`. """ @@ -321,7 +336,9 @@ def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectRepor @hookspec(firstresult=True) -def pytest_pycollect_makemodule(path: py.path.local, parent) -> Optional["Module"]: +def pytest_pycollect_makemodule( + module_path: Path, path: "LEGACY_PATH", parent +) -> Optional["Module"]: """Return a Module collector or None for the given path. This hook will be called for each matching test module path. @@ -330,7 +347,14 @@ def pytest_pycollect_makemodule(path: py.path.local, parent) -> Optional["Module Stops at first non-None result, see :ref:`firstresult`. - :param py.path.local path: The path of module to collect. + :param pathlib.Path module_path: The path of the module to collect. + :param LEGACY_PATH path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. """ @@ -368,7 +392,7 @@ def pytest_make_parametrize_id( Stops at first non-None result, see :ref:`firstresult`. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :param val: The parametrized value. :param str argname: The automatic parameter name produced by pytest. """ @@ -443,7 +467,7 @@ def pytest_runtest_logstart( ) -> None: """Called at the start of running the runtest protocol for a single item. - See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. :param str nodeid: Full node ID of the item. :param location: A tuple of ``(filename, lineno, testname)``. @@ -455,7 +479,7 @@ def pytest_runtest_logfinish( ) -> None: """Called at the end of running the runtest protocol for a single item. - See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. :param str nodeid: Full node ID of the item. :param location: A tuple of ``(filename, lineno, testname)``. @@ -489,9 +513,9 @@ def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: :param nextitem: The scheduled-to-be-next test item (None if no further test item is - scheduled). This argument can be used to perform exact teardowns, - i.e. calling just enough finalizers so that nextitem only needs to - call setup-functions. + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. """ @@ -499,28 +523,29 @@ def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: def pytest_runtest_makereport( item: "Item", call: "CallInfo[None]" ) -> Optional["TestReport"]: - """Called to create a :py:class:`_pytest.reports.TestReport` for each of + """Called to create a :class:`~pytest.TestReport` for each of the setup, call and teardown runtest phases of a test item. - See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. - :param CallInfo[None] call: The ``CallInfo`` for the phase. + :param call: The :class:`~pytest.CallInfo` for the phase. Stops at first non-None result, see :ref:`firstresult`. """ def pytest_runtest_logreport(report: "TestReport") -> None: - """Process the :py:class:`_pytest.reports.TestReport` produced for each + """Process the :class:`~pytest.TestReport` produced for each of the setup, call and teardown runtest phases of an item. - See :func:`pytest_runtest_protocol` for a description of the runtest protocol. + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. """ @hookspec(firstresult=True) def pytest_report_to_serializable( - config: "Config", report: Union["CollectReport", "TestReport"], + config: "Config", + report: Union["CollectReport", "TestReport"], ) -> Optional[Dict[str, Any]]: """Serialize the given report object into a data structure suitable for sending over the wire, e.g. converted to JSON.""" @@ -528,9 +553,11 @@ def pytest_report_to_serializable( @hookspec(firstresult=True) def pytest_report_from_serializable( - config: "Config", data: Dict[str, Any], + config: "Config", + data: Dict[str, Any], ) -> Optional[Union["CollectReport", "TestReport"]]: - """Restore a report object previously serialized with pytest_report_to_serializable().""" + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`.""" # ------------------------------------------------------------------------- @@ -577,7 +604,8 @@ def pytest_sessionstart(session: "Session") -> None: def pytest_sessionfinish( - session: "Session", exitstatus: Union[int, "ExitCode"], + session: "Session", + exitstatus: Union[int, "ExitCode"], ) -> None: """Called after whole test run finished, right before returning the exit status to the system. @@ -589,7 +617,7 @@ def pytest_sessionfinish( def pytest_unconfigure(config: "Config") -> None: """Called before test process is exited. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. """ @@ -608,12 +636,12 @@ def pytest_assertrepr_compare( *in* a string will be escaped. Note that all but the first line will be indented slightly, the intention is for the first line to be a summary. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. """ def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None: - """**(Experimental)** Called whenever an assertion passes. + """Called whenever an assertion passes. .. versionadded:: 5.0 @@ -637,13 +665,6 @@ def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> No :param int lineno: Line number of the assert statement. :param str orig: String with the original assertion. :param str expl: String with the assert explanation. - - .. note:: - - This hook is **experimental**, so its parameters or even the hook itself might - be changed/removed without warning in any future pytest release. - - If you find this hook useful, please share your feedback in an issue. """ @@ -653,12 +674,13 @@ def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> No def pytest_report_header( - config: "Config", startdir: py.path.local + config: "Config", start_path: Path, startdir: "LEGACY_PATH" ) -> Union[str, List[str]]: """Return a string or list of strings to be displayed as header info for terminal reporting. - :param _pytest.config.Config config: The pytest config object. - :param py.path.local startdir: The starting dir. + :param pytest.Config config: The pytest config object. + :param Path start_path: The starting dir. + :param LEGACY_PATH startdir: The starting dir (deprecated). .. note:: @@ -672,11 +694,19 @@ def pytest_report_header( This function should be implemented only in plugins or ``conftest.py`` files situated at the tests root directory due to how pytest :ref:`discovers plugins during startup <pluginorder>`. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. """ def pytest_report_collectionfinish( - config: "Config", startdir: py.path.local, items: Sequence["Item"], + config: "Config", + start_path: Path, + startdir: "LEGACY_PATH", + items: Sequence["Item"], ) -> Union[str, List[str]]: """Return a string or list of strings to be displayed after collection has finished successfully. @@ -685,8 +715,9 @@ def pytest_report_collectionfinish( .. versionadded:: 3.2 - :param _pytest.config.Config config: The pytest config object. - :param py.path.local startdir: The starting dir. + :param pytest.Config config: The pytest config object. + :param Path start_path: The starting dir. + :param LEGACY_PATH startdir: The starting dir (deprecated). :param items: List of pytest items that are going to be executed; this list should not be modified. .. note:: @@ -695,15 +726,18 @@ def pytest_report_collectionfinish( ran before it. If you want to have your line(s) displayed first, use :ref:`trylast=True <plugin-hookorder>`. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. """ @hookspec(firstresult=True) def pytest_report_teststatus( report: Union["CollectReport", "TestReport"], config: "Config" -) -> Tuple[ - str, str, Union[str, Mapping[str, bool]], -]: +) -> Tuple[str, str, Union[str, Mapping[str, bool]]]: """Return result-category, shortletter and verbose word for status reporting. @@ -721,20 +755,22 @@ def pytest_report_teststatus( for example ``"rerun", "R", ("RERUN", {"yellow": True})``. :param report: The report object whose status is to be returned. - :param _pytest.config.Config config: The pytest config object. + :param config: The pytest config object. Stops at first non-None result, see :ref:`firstresult`. """ def pytest_terminal_summary( - terminalreporter: "TerminalReporter", exitstatus: "ExitCode", config: "Config", + terminalreporter: "TerminalReporter", + exitstatus: "ExitCode", + config: "Config", ) -> None: """Add a section to terminal summary reporting. :param _pytest.terminal.TerminalReporter terminalreporter: The internal terminal reporter object. :param int exitstatus: The exit status that will be reported back to the OS. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. .. versionadded:: 4.2 The ``config`` parameter. @@ -824,7 +860,7 @@ def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]: .. versionadded:: 6.2 - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :returns: A dictionary of additional globals to add. """ @@ -835,7 +871,8 @@ def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]: def pytest_internalerror( - excrepr: "ExceptionRepr", excinfo: "ExceptionInfo[BaseException]", + excrepr: "ExceptionRepr", + excinfo: "ExceptionInfo[BaseException]", ) -> Optional[bool]: """Called for internal errors. @@ -858,11 +895,11 @@ def pytest_exception_interact( """Called when an exception was raised which can potentially be interactively handled. - May be called during collection (see :py:func:`pytest_make_collect_report`), - in which case ``report`` is a :py:class:`_pytest.reports.CollectReport`. + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`CollectReport`. - May be called during runtest of an item (see :py:func:`pytest_runtest_protocol`), - in which case ``report`` is a :py:class:`_pytest.reports.TestReport`. + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`TestReport`. This hook is not called if the exception that was raised is an internal exception like ``skip.Exception``. @@ -875,7 +912,7 @@ def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None: Can be used by plugins to take special action just before the python debugger enters interactive mode. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :param pdb.Pdb pdb: The Pdb instance. """ @@ -886,6 +923,6 @@ def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None: Can be used by plugins to take special action just after the python debugger leaves interactive mode. - :param _pytest.config.Config config: The pytest config object. + :param pytest.Config config: The pytest config object. :param pdb.Pdb pdb: The Pdb instance. """ diff --git a/contrib/python/pytest/py3/_pytest/junitxml.py b/contrib/python/pytest/py3/_pytest/junitxml.py index c4761cd3b8..4af5fbab0c 100644 --- a/contrib/python/pytest/py3/_pytest/junitxml.py +++ b/contrib/python/pytest/py3/_pytest/junitxml.py @@ -30,11 +30,11 @@ from _pytest.config import filename_arg from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureRequest from _pytest.reports import TestReport -from _pytest.store import StoreKey +from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter -xml_key = StoreKey["LogXML"]() +xml_key = StashKey["LogXML"]() def bin_xml_escape(arg: object) -> str: @@ -256,7 +256,7 @@ class _NodeReporter: def finalize(self) -> None: data = self.to_xml() self.__dict__.clear() - # Type ignored becuase mypy doesn't like overriding a method. + # Type ignored because mypy doesn't like overriding a method. # Also the return value doesn't match... self.to_xml = lambda: data # type: ignore[assignment] @@ -267,7 +267,7 @@ def _warn_incompatibility_with_xunit2( """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" from _pytest.warning_types import PytestWarning - xml = request.config._store.get(xml_key, None) + xml = request.config.stash.get(xml_key, None) if xml is not None and xml.family not in ("xunit1", "legacy"): request.node.warn( PytestWarning( @@ -322,7 +322,7 @@ def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], Non attr_func = add_attr_noop - xml = request.config._store.get(xml_key, None) + xml = request.config.stash.get(xml_key, None) if xml is not None: node_reporter = xml.node_reporter(request.node.nodeid) attr_func = node_reporter.add_attribute @@ -359,8 +359,8 @@ def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object] .. warning:: Currently this fixture **does not work** with the - `pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See issue - `#7767 <https://github.com/pytest-dev/pytest/issues/7767>`__ for details. + `pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See + :issue:`7767` for details. """ __tracebackhide__ = True @@ -370,7 +370,7 @@ def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object] __tracebackhide__ = True _check_record_param_type("name", name) - xml = request.config._store.get(xml_key, None) + xml = request.config.stash.get(xml_key, None) if xml is not None: record_func = xml.add_global_property # noqa return record_func @@ -428,7 +428,7 @@ def pytest_configure(config: Config) -> None: # Prevent opening xmllog on worker nodes (xdist). if xmlpath and not hasattr(config, "workerinput"): junit_family = config.getini("junit_family") - config._store[xml_key] = LogXML( + config.stash[xml_key] = LogXML( xmlpath, config.option.junitprefix, config.getini("junit_suite_name"), @@ -437,23 +437,19 @@ def pytest_configure(config: Config) -> None: junit_family, config.getini("junit_log_passing_tests"), ) - config.pluginmanager.register(config._store[xml_key]) + config.pluginmanager.register(config.stash[xml_key]) def pytest_unconfigure(config: Config) -> None: - xml = config._store.get(xml_key, None) + xml = config.stash.get(xml_key, None) if xml: - del config._store[xml_key] + del config.stash[xml_key] config.pluginmanager.unregister(xml) def mangle_test_address(address: str) -> List[str]: path, possible_open_bracket, params = address.partition("[") names = path.split("::") - try: - names.remove("()") - except ValueError: - pass # Convert file path to dotted path. names[0] = names[0].replace(nodes.SEP, ".") names[0] = re.sub(r"\.py$", "", names[0]) @@ -486,7 +482,7 @@ class LogXML: ) self.node_reporters: Dict[ Tuple[Union[str, TestReport], object], _NodeReporter - ] = ({}) + ] = {} self.node_reporters_ordered: List[_NodeReporter] = [] self.global_properties: List[Tuple[str, str]] = [] @@ -648,39 +644,39 @@ class LogXML: dirname = os.path.dirname(os.path.abspath(self.logfile)) if not os.path.isdir(dirname): os.makedirs(dirname) - logfile = open(self.logfile, "w", encoding="utf-8") - suite_stop_time = timing.time() - suite_time_delta = suite_stop_time - self.suite_start_time - - numtests = ( - self.stats["passed"] - + self.stats["failure"] - + self.stats["skipped"] - + self.stats["error"] - - self.cnt_double_fail_tests - ) - logfile.write('<?xml version="1.0" encoding="utf-8"?>') - - suite_node = ET.Element( - "testsuite", - name=self.suite_name, - errors=str(self.stats["error"]), - failures=str(self.stats["failure"]), - skipped=str(self.stats["skipped"]), - tests=str(numtests), - time="%.3f" % suite_time_delta, - timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), - hostname=platform.node(), - ) - global_properties = self._get_global_properties_node() - if global_properties is not None: - suite_node.append(global_properties) - for node_reporter in self.node_reporters_ordered: - suite_node.append(node_reporter.to_xml()) - testsuites = ET.Element("testsuites") - testsuites.append(suite_node) - logfile.write(ET.tostring(testsuites, encoding="unicode")) - logfile.close() + + with open(self.logfile, "w", encoding="utf-8") as logfile: + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('<?xml version="1.0" encoding="utf-8"?>') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time="%.3f" % suite_time_delta, + timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") diff --git a/contrib/python/pytest/py3/_pytest/legacypath.py b/contrib/python/pytest/py3/_pytest/legacypath.py new file mode 100644 index 0000000000..37e8c24220 --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/legacypath.py @@ -0,0 +1,467 @@ +"""Add backward compatibility support for the legacy py path type.""" +import shlex +import subprocess +from pathlib import Path +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +import attr +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import final +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + +if TYPE_CHECKING: + from typing_extensions import Final + + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: "Final" = Pytester.CLOSE_STDIN + TimeoutExpired: "Final" = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + """See :meth:`Pytester._finalize`.""" + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"<Testdir {self.tmpdir!r}>" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@attr.s(init=False, auto_attribs=True) +class TempdirFactory: + """Backward compatibility wrapper that implements :class:``_pytest.compat.LEGACY_PATH`` + for :class:``TempPathFactory``.""" + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``_pytest.compat.LEGACY_PATH`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Backward compat wrapper for ``_tmppath_factory.getbasetemp``.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a `legacy_path`_ object. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir <rootdir>`. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> Optional[LEGACY_PATH]: + """The path to the :ref:`configfile <configfiles>`. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_stardir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type( + self, name: str, type: str, value: Union[str, List[str]] +): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_stardir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/contrib/python/pytest/py3/_pytest/logging.py b/contrib/python/pytest/py3/_pytest/logging.py index 2e4847328a..31ad830107 100644 --- a/contrib/python/pytest/py3/_pytest/logging.py +++ b/contrib/python/pytest/py3/_pytest/logging.py @@ -31,15 +31,15 @@ from _pytest.deprecated import check_ispytest from _pytest.fixtures import fixture from _pytest.fixtures import FixtureRequest from _pytest.main import Session -from _pytest.store import StoreKey +from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" _ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") -caplog_handler_key = StoreKey["LogCaptureHandler"]() -caplog_records_key = StoreKey[Dict[str, List[logging.LogRecord]]]() +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]() def _remove_ansi_escape_sequences(text: str) -> str: @@ -59,32 +59,47 @@ class ColoredLevelFormatter(logging.Formatter): logging.DEBUG: {"purple"}, logging.NOTSET: set(), } - LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)") + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter self._original_fmt = self._style._fmt self._level_to_fmt_mapping: Dict[int, str] = {} + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + assert self._fmt is not None levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) if not levelname_fmt_match: return levelname_fmt = levelname_fmt_match.group() - for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): - formatted_levelname = levelname_fmt % { - "levelname": logging.getLevelName(level) - } - - # add ANSI escape sequences around the formatted levelname - color_kwargs = {name: True for name in color_opts} - colorized_formatted_levelname = terminalwriter.markup( - formatted_levelname, **color_kwargs - ) - self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( - colorized_formatted_levelname, self._fmt - ) + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) def format(self, record: logging.LogRecord) -> str: fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) @@ -104,14 +119,6 @@ class PercentStyleMultiline(logging.PercentStyle): self._auto_indent = self._get_auto_indent(auto_indent) @staticmethod - def _update_message( - record_dict: Dict[str, object], message: str - ) -> Dict[str, object]: - tmp = record_dict.copy() - tmp["message"] = message - return tmp - - @staticmethod def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int: """Determine the current auto indentation setting. @@ -176,7 +183,7 @@ class PercentStyleMultiline(logging.PercentStyle): if auto_indent: lines = record.message.splitlines() - formatted = self._fmt % self._update_message(record.__dict__, lines[0]) + formatted = self._fmt % {**record.__dict__, "message": lines[0]} if auto_indent < 0: indentation = _remove_ansi_escape_sequences(formatted).find( @@ -372,7 +379,7 @@ class LogCaptureFixture: :rtype: LogCaptureHandler """ - return self._item._store[caplog_handler_key] + return self._item.stash[caplog_handler_key] def get_records(self, when: str) -> List[logging.LogRecord]: """Get the logging records for one of the possible test phases. @@ -385,7 +392,7 @@ class LogCaptureFixture: .. versionadded:: 3.4 """ - return self._item._store[caplog_records_key].get(when, []) + return self._item.stash[caplog_records_key].get(when, []) @property def text(self) -> str: @@ -451,7 +458,7 @@ class LogCaptureFixture: @contextmanager def at_level( - self, level: int, logger: Optional[str] = None + self, level: Union[int, str], logger: Optional[str] = None ) -> Generator[None, None, None]: """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original @@ -626,7 +633,8 @@ class LoggingPlugin: finally: self.log_file_handler.release() if old_stream: - old_stream.close() + # https://github.com/python/typeshed/pull/5663 + old_stream.close() # type:ignore[attr-defined] def _log_cli_enabled(self): """Return whether live logging is enabled.""" @@ -685,14 +693,16 @@ class LoggingPlugin: def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: """Implement the internals of the pytest_runtest_xxx() hooks.""" with catching_logs( - self.caplog_handler, level=self.log_level, + self.caplog_handler, + level=self.log_level, ) as caplog_handler, catching_logs( - self.report_handler, level=self.log_level, + self.report_handler, + level=self.log_level, ) as report_handler: caplog_handler.reset() report_handler.reset() - item._store[caplog_records_key][when] = caplog_handler.records - item._store[caplog_handler_key] = caplog_handler + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler yield @@ -704,7 +714,7 @@ class LoggingPlugin: self.log_cli_handler.set_when("setup") empty: Dict[str, List[logging.LogRecord]] = {} - item._store[caplog_records_key] = empty + item.stash[caplog_records_key] = empty yield from self._runtest_for(item, "setup") @hookimpl(hookwrapper=True) @@ -718,8 +728,8 @@ class LoggingPlugin: self.log_cli_handler.set_when("teardown") yield from self._runtest_for(item, "teardown") - del item._store[caplog_records_key] - del item._store[caplog_handler_key] + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] @hookimpl def pytest_runtest_logfinish(self) -> None: @@ -766,7 +776,7 @@ class _LiveLoggingStreamHandler(logging.StreamHandler): terminal_reporter: TerminalReporter, capture_manager: Optional[CaptureManager], ) -> None: - logging.StreamHandler.__init__(self, stream=terminal_reporter) # type: ignore[arg-type] + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] self.capture_manager = capture_manager self.reset() self.set_when(None) diff --git a/contrib/python/pytest/py3/_pytest/main.py b/contrib/python/pytest/py3/_pytest/main.py index 41a33d4494..fea8179ca7 100644 --- a/contrib/python/pytest/py3/_pytest/main.py +++ b/contrib/python/pytest/py3/_pytest/main.py @@ -21,7 +21,6 @@ from typing import TYPE_CHECKING from typing import Union import attr -import py import _pytest._code from _pytest import nodes @@ -37,6 +36,7 @@ from _pytest.fixtures import FixtureManager from _pytest.outcomes import exit from _pytest.pathlib import absolutepath from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import visit from _pytest.reports import CollectReport from _pytest.reports import TestReport @@ -115,7 +115,9 @@ def pytest_addoption(parser: Parser) -> None: help="markers not registered in the `markers` section of the configuration file raise errors.", ) group._addoption( - "--strict", action="store_true", help="(deprecated) alias to --strict-markers.", + "--strict", + action="store_true", + help="(deprecated) alias to --strict-markers.", ) group._addoption( "-c", @@ -237,10 +239,7 @@ def validate_basetemp(path: str) -> str: """Return whether query is an ancestor of base.""" if base == query: return True - for parent in base.parents: - if parent == query: - return True - return False + return query in base.parents # check if path is an ancestor of cwd if is_ancestor(Path.cwd(), Path(path).absolute()): @@ -290,7 +289,7 @@ def wrap_session( except exit.Exception as exc: if exc.returncode is not None: session.exitstatus = exc.returncode - sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") else: if isinstance(excinfo.value, SystemExit): sys.stderr.write("mainloop: caught unexpected SystemExit!\n") @@ -298,7 +297,7 @@ def wrap_session( finally: # Explicitly break reference cycle. excinfo = None # type: ignore - session.startdir.chdir() + os.chdir(session.startpath) if initstate >= 2: try: config.hook.pytest_sessionfinish( @@ -307,7 +306,7 @@ def wrap_session( except exit.Exception as exc: if exc.returncode is not None: session.exitstatus = exc.returncode - sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") config._ensure_unconfigure() return session.exitstatus @@ -353,11 +352,14 @@ def pytest_runtestloop(session: "Session") -> bool: return True -def _in_venv(path: py.path.local) -> bool: +def _in_venv(path: Path) -> bool: """Attempt to detect if ``path`` is the root of a Virtual Environment by checking for the existence of the appropriate activate script.""" - bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") - if not bindir.isdir(): + bindir = path.joinpath("Scripts" if sys.platform.startswith("win") else "bin") + try: + if not bindir.is_dir(): + return False + except OSError: return False activates = ( "activate", @@ -367,32 +369,34 @@ def _in_venv(path: py.path.local) -> bool: "Activate.bat", "Activate.ps1", ) - return any([fname.basename in activates for fname in bindir.listdir()]) + return any(fname.name in activates for fname in bindir.iterdir()) -def pytest_ignore_collect(path: py.path.local, config: Config) -> Optional[bool]: - ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) +def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]: + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent, rootpath=config.rootpath + ) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: - ignore_paths.extend([py.path.local(x) for x in excludeopt]) + ignore_paths.extend(absolutepath(x) for x in excludeopt) - if py.path.local(path) in ignore_paths: + if collection_path in ignore_paths: return True ignore_globs = config._getconftest_pathlist( - "collect_ignore_glob", path=path.dirpath() + "collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath ) ignore_globs = ignore_globs or [] excludeglobopt = config.getoption("ignore_glob") if excludeglobopt: - ignore_globs.extend([py.path.local(x) for x in excludeglobopt]) + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) - if any(fnmatch.fnmatch(str(path), str(glob)) for glob in ignore_globs): + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): return True allow_in_venv = config.getoption("collect_in_virtualenv") - if not allow_in_venv and _in_venv(path): + if not allow_in_venv and _in_venv(collection_path): return True return None @@ -436,9 +440,9 @@ class Failed(Exception): """Signals a stop as failed test run.""" -@attr.s +@attr.s(slots=True, auto_attribs=True) class _bestrelpath_cache(Dict[Path, str]): - path = attr.ib(type=Path) + path: Path def __missing__(self, path: Path) -> str: r = bestrelpath(self.path, path) @@ -458,15 +462,19 @@ class Session(nodes.FSCollector): def __init__(self, config: Config) -> None: super().__init__( - config.rootdir, parent=None, config=config, session=self, nodeid="" + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", ) self.testsfailed = 0 self.testscollected = 0 self.shouldstop: Union[bool, str] = False self.shouldfail: Union[bool, str] = False self.trace = config.trace.root.get("collection") - self.startdir = config.invocation_dir - self._initialpaths: FrozenSet[py.path.local] = frozenset() + self._initialpaths: FrozenSet[Path] = frozenset() self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath) @@ -474,7 +482,7 @@ class Session(nodes.FSCollector): @classmethod def from_config(cls, config: Config) -> "Session": - session: Session = cls._create(config) + session: Session = cls._create(config=config) return session def __repr__(self) -> str: @@ -486,6 +494,14 @@ class Session(nodes.FSCollector): self.testscollected, ) + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + def _node_location_to_relpath(self, node_path: Path) -> str: # bestrelpath is a quite slow function. return self._bestrelpathcache[node_path] @@ -509,20 +525,28 @@ class Session(nodes.FSCollector): pytest_collectreport = pytest_runtest_logreport - def isinitpath(self, path: py.path.local) -> bool: - return path in self._initialpaths + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + return path_ in self._initialpaths - def gethookproxy(self, fspath: py.path.local): + def gethookproxy(self, fspath: "os.PathLike[str]"): + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager # Check if we have the common case of running # hooks with all conftest.py files. - pm = self.config.pluginmanager my_conftestmodules = pm._getconftestmodules( - fspath, self.config.getoption("importmode") + path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, ) remove_mods = pm._conftest_plugins.difference(my_conftestmodules) if remove_mods: # One or more conftests are not in use at this fspath. - proxy = FSHookProxy(pm, remove_mods) + from .config.compat import PathAwareHookProxy + + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) else: # All plugins are active for this fspath. proxy = self.config.hook @@ -531,38 +555,38 @@ class Session(nodes.FSCollector): def _recurse(self, direntry: "os.DirEntry[str]") -> bool: if direntry.name == "__pycache__": return False - path = py.path.local(direntry.path) - ihook = self.gethookproxy(path.dirpath()) - if ihook.pytest_ignore_collect(path=path, config=self.config): + fspath = Path(direntry.path) + ihook = self.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): return False norecursepatterns = self.config.getini("norecursedirs") - if any(path.check(fnmatch=pat) for pat in norecursepatterns): + if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns): return False return True def _collectfile( - self, path: py.path.local, handle_dupes: bool = True + self, fspath: Path, handle_dupes: bool = True ) -> Sequence[nodes.Collector]: assert ( - path.isfile() + fspath.is_file() ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( - path, path.isdir(), path.exists(), path.islink() + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() ) - ihook = self.gethookproxy(path) - if not self.isinitpath(path): - if ihook.pytest_ignore_collect(path=path, config=self.config): + ihook = self.gethookproxy(fspath) + if not self.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): return () if handle_dupes: keepduplicates = self.config.getoption("keepduplicates") if not keepduplicates: duplicate_paths = self.config.pluginmanager._duplicatepaths - if path in duplicate_paths: + if fspath in duplicate_paths: return () else: - duplicate_paths.add(path) + duplicate_paths.add(fspath) - return ihook.pytest_collect_file(path=path, parent=self) # type: ignore[no-any-return] + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] @overload def perform_collect( @@ -581,8 +605,7 @@ class Session(nodes.FSCollector): ) -> Sequence[Union[nodes.Item, nodes.Collector]]: """Perform the collection phase for this session. - This is called by the default - :func:`pytest_collection <_pytest.hookspec.pytest_collection>` hook + This is called by the default :hook:`pytest_collection` hook implementation; see the documentation of this hook for more details. For testing purposes, it may also be called directly on a fresh ``Session``. @@ -600,14 +623,14 @@ class Session(nodes.FSCollector): self.trace.root.indent += 1 self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] - self._initial_parts: List[Tuple[py.path.local, List[str]]] = [] + self._initial_parts: List[Tuple[Path, List[str]]] = [] self.items: List[nodes.Item] = [] hook = self.config.hook items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items try: - initialpaths: List[py.path.local] = [] + initialpaths: List[Path] = [] for arg in args: fspath, parts = resolve_collection_argument( self.config.invocation_params.dir, @@ -647,14 +670,12 @@ class Session(nodes.FSCollector): from _pytest.python import Package # Keep track of any collected nodes in here, so we don't duplicate fixtures. - node_cache1: Dict[py.path.local, Sequence[nodes.Collector]] = {} - node_cache2: Dict[ - Tuple[Type[nodes.Collector], py.path.local], nodes.Collector - ] = ({}) + node_cache1: Dict[Path, Sequence[nodes.Collector]] = {} + node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {} # Keep track of any collected collectors in matchnodes paths, so they # are not collected more than once. - matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = ({}) + matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {} # Dirnames of pkgs with dunder-init files. pkg_roots: Dict[str, Package] = {} @@ -668,36 +689,37 @@ class Session(nodes.FSCollector): # No point in finding packages when collecting doctests. if not self.config.getoption("doctestmodules", False): pm = self.config.pluginmanager - for parent in reversed(argpath.parts()): - if pm._confcutdir and pm._confcutdir.relto(parent): + confcutdir = pm._confcutdir + for parent in (argpath, *argpath.parents): + if confcutdir and parent in confcutdir.parents: break - if parent.isdir(): - pkginit = parent.join("__init__.py") - if pkginit.isfile() and pkginit not in node_cache1: + if parent.is_dir(): + pkginit = parent / "__init__.py" + if pkginit.is_file() and pkginit not in node_cache1: col = self._collectfile(pkginit, handle_dupes=False) if col: if isinstance(col[0], Package): pkg_roots[str(parent)] = col[0] - node_cache1[col[0].fspath] = [col[0]] + node_cache1[col[0].path] = [col[0]] # If it's a directory argument, recurse and look for any Subpackages. # Let the Package collector deal with subnodes, don't collect here. - if argpath.check(dir=1): - assert not names, "invalid arg {!r}".format((argpath, names)) + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" - seen_dirs: Set[py.path.local] = set() + seen_dirs: Set[Path] = set() for direntry in visit(str(argpath), self._recurse): if not direntry.is_file(): continue - path = py.path.local(direntry.path) - dirpath = path.dirpath() + path = Path(direntry.path) + dirpath = path.parent if dirpath not in seen_dirs: # Collect packages first. seen_dirs.add(dirpath) - pkginit = dirpath.join("__init__.py") + pkginit = dirpath / "__init__.py" if pkginit.exists(): for x in self._collectfile(pkginit): yield x @@ -708,19 +730,19 @@ class Session(nodes.FSCollector): continue for x in self._collectfile(path): - key = (type(x), x.fspath) - if key in node_cache2: - yield node_cache2[key] + key2 = (type(x), x.path) + if key2 in node_cache2: + yield node_cache2[key2] else: - node_cache2[key] = x + node_cache2[key2] = x yield x else: - assert argpath.check(file=1) + assert argpath.is_file() if argpath in node_cache1: col = node_cache1[argpath] else: - collect_root = pkg_roots.get(argpath.dirname, self) + collect_root = pkg_roots.get(str(argpath.parent), self) col = collect_root._collectfile(argpath, handle_dupes=False) if col: node_cache1[argpath] = col @@ -758,9 +780,6 @@ class Session(nodes.FSCollector): submatchnodes.append(r) if submatchnodes: work.append((submatchnodes, matchnames[1:])) - # XXX Accept IDs that don't have "()" for class instances. - elif len(rep.result) == 1 and rep.result[0].name == "()": - work.append((rep.result, matchnames)) else: # Report collection failures here to avoid failing to run some test # specified in the command line because the module could not be @@ -780,9 +799,7 @@ class Session(nodes.FSCollector): # first yielded item will be the __init__ Module itself, so # just use that. If this special case isn't taken, then all the # files in the package will be yielded. - if argpath.basename == "__init__.py" and isinstance( - matching[0], Package - ): + if argpath.name == "__init__.py" and isinstance(matching[0], Package): try: yield next(iter(matching[0].collect())) except StopIteration: @@ -831,7 +848,7 @@ def search_pypath(module_name: str) -> str: def resolve_collection_argument( invocation_path: Path, arg: str, *, as_pypath: bool = False -) -> Tuple[py.path.local, List[str]]: +) -> Tuple[Path, List[str]]: """Parse path arguments optionally containing selection parts and return (fspath, names). Command-line arguments can point to files and/or directories, and optionally contain @@ -841,7 +858,7 @@ def resolve_collection_argument( This function ensures the path exists, and returns a tuple: - (py.path.path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"]) + (Path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"]) When as_pypath is True, expects that the command-line argument actually contains module paths instead of file-system paths: @@ -854,7 +871,10 @@ def resolve_collection_argument( If the path doesn't exist, raise UsageError. If the path is a directory and selection parts are present, raise UsageError. """ - strpath, *parts = str(arg).split("::") + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" if as_pypath: strpath = search_pypath(strpath) fspath = invocation_path / strpath @@ -873,4 +893,4 @@ def resolve_collection_argument( else "directory argument cannot contain :: selection parts: {arg}" ) raise UsageError(msg.format(arg=arg)) - return py.path.local(str(fspath)), parts + return fspath, parts diff --git a/contrib/python/pytest/py3/_pytest/mark/__init__.py b/contrib/python/pytest/py3/_pytest/mark/__init__.py index 329a11c4ae..7e082f2e6e 100644 --- a/contrib/python/pytest/py3/_pytest/mark/__init__.py +++ b/contrib/python/pytest/py3/_pytest/mark/__init__.py @@ -25,7 +25,7 @@ from _pytest.config import UsageError from _pytest.config.argparsing import Parser from _pytest.deprecated import MINUS_K_COLON from _pytest.deprecated import MINUS_K_DASH -from _pytest.store import StoreKey +from _pytest.stash import StashKey if TYPE_CHECKING: from _pytest.nodes import Item @@ -41,7 +41,7 @@ __all__ = [ ] -old_mark_config_key = StoreKey[Optional[Config]]() +old_mark_config_key = StashKey[Optional[Config]]() def param( @@ -56,7 +56,10 @@ def param( @pytest.mark.parametrize( "test_input,expected", - [("3+5", 8), pytest.param("6*9", 42, marks=pytest.mark.xfail),], + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], ) def test_eval(test_input, expected): assert eval(test_input) == expected @@ -130,7 +133,7 @@ def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: return None -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class KeywordMatcher: """A matcher for keywords. @@ -145,7 +148,7 @@ class KeywordMatcher: any item, as well as names directly assigned to test functions. """ - _names = attr.ib(type=AbstractSet[str]) + _names: AbstractSet[str] @classmethod def from_item(cls, item: "Item") -> "KeywordMatcher": @@ -155,7 +158,7 @@ class KeywordMatcher: import pytest for node in item.listchain(): - if not isinstance(node, (pytest.Instance, pytest.Session)): + if not isinstance(node, pytest.Session): mapped_names.add(node.name) # Add the names added as extra keywords to current or parent items. @@ -187,27 +190,22 @@ def deselect_by_keyword(items: "List[Item]", config: Config) -> None: return if keywordexpr.startswith("-"): - # To be removed in pytest 7.0.0. + # To be removed in pytest 8.0.0. warnings.warn(MINUS_K_DASH, stacklevel=2) keywordexpr = "not " + keywordexpr[1:] selectuntil = False if keywordexpr[-1:] == ":": - # To be removed in pytest 7.0.0. + # To be removed in pytest 8.0.0. warnings.warn(MINUS_K_COLON, stacklevel=2) selectuntil = True keywordexpr = keywordexpr[:-1] - try: - expression = Expression.compile(keywordexpr) - except ParseError as e: - raise UsageError( - f"Wrong expression passed to '-k': {keywordexpr}: {e}" - ) from None + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") remaining = [] deselected = [] for colitem in items: - if keywordexpr and not expression.evaluate(KeywordMatcher.from_item(colitem)): + if keywordexpr and not expr.evaluate(KeywordMatcher.from_item(colitem)): deselected.append(colitem) else: if selectuntil: @@ -219,17 +217,17 @@ def deselect_by_keyword(items: "List[Item]", config: Config) -> None: items[:] = remaining -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class MarkMatcher: """A matcher for markers which are present. Tries to match on any marker names, attached to the given colitem. """ - own_mark_names = attr.ib() + own_mark_names: AbstractSet[str] @classmethod - def from_item(cls, item) -> "MarkMatcher": + def from_item(cls, item: "Item") -> "MarkMatcher": mark_names = {mark.name for mark in item.iter_markers()} return cls(mark_names) @@ -242,31 +240,33 @@ def deselect_by_mark(items: "List[Item]", config: Config) -> None: if not matchexpr: return - try: - expression = Expression.compile(matchexpr) - except ParseError as e: - raise UsageError(f"Wrong expression passed to '-m': {matchexpr}: {e}") from None - - remaining = [] - deselected = [] + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: List[Item] = [] + deselected: List[Item] = [] for item in items: - if expression.evaluate(MarkMatcher.from_item(item)): + if expr.evaluate(MarkMatcher.from_item(item)): remaining.append(item) else: deselected.append(item) - if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None: deselect_by_keyword(items, config) deselect_by_mark(items, config) def pytest_configure(config: Config) -> None: - config._store[old_mark_config_key] = MARK_GEN._config + config.stash[old_mark_config_key] = MARK_GEN._config MARK_GEN._config = config empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) @@ -279,4 +279,4 @@ def pytest_configure(config: Config) -> None: def pytest_unconfigure(config: Config) -> None: - MARK_GEN._config = config._store.get(old_mark_config_key, None) + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/contrib/python/pytest/py3/_pytest/mark/expression.py b/contrib/python/pytest/py3/_pytest/mark/expression.py index dc3991b10c..92220d7723 100644 --- a/contrib/python/pytest/py3/_pytest/mark/expression.py +++ b/contrib/python/pytest/py3/_pytest/mark/expression.py @@ -6,7 +6,7 @@ expression: expr? EOF expr: and_expr ('or' and_expr)* and_expr: not_expr ('and' not_expr)* not_expr: 'not' not_expr | '(' expr ')' | ident -ident: (\w|:|\+|-|\.|\[|\])+ +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ The semantics are: @@ -47,11 +47,11 @@ class TokenType(enum.Enum): EOF = "end of input" -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class Token: - type = attr.ib(type=TokenType) - value = attr.ib(type=str) - pos = attr.ib(type=int) + type: TokenType + value: str + pos: int class ParseError(Exception): @@ -88,7 +88,7 @@ class Scanner: yield Token(TokenType.RPAREN, ")", pos) pos += 1 else: - match = re.match(r"(:?\w|:|\+|-|\.|\[|\])+", input[pos:]) + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) if match: value = match.group(0) if value == "or": @@ -102,7 +102,8 @@ class Scanner: pos += len(value) else: raise ParseError( - pos + 1, 'unexpected character "{}"'.format(input[pos]), + pos + 1, + f'unexpected character "{input[pos]}"', ) yield Token(TokenType.EOF, "", pos) @@ -120,7 +121,8 @@ class Scanner: raise ParseError( self.current.pos + 1, "expected {}; got {}".format( - " OR ".join(type.value for type in expected), self.current.type.value, + " OR ".join(type.value for type in expected), + self.current.type.value, ), ) @@ -188,7 +190,7 @@ class MatcherAdapter(Mapping[str, bool]): class Expression: """A compiled match expression as used by -k and -m. - The expression can be evaulated against different matchers. + The expression can be evaluated against different matchers. """ __slots__ = ("code",) @@ -204,7 +206,9 @@ class Expression: """ astexpr = expression(Scanner(input)) code: types.CodeType = compile( - astexpr, filename="<pytest match expression>", mode="eval", + astexpr, + filename="<pytest match expression>", + mode="eval", ) return Expression(code) diff --git a/contrib/python/pytest/py3/_pytest/mark/structures.py b/contrib/python/pytest/py3/_pytest/mark/structures.py index f5736a4c1c..92a9ea7512 100644 --- a/contrib/python/pytest/py3/_pytest/mark/structures.py +++ b/contrib/python/pytest/py3/_pytest/mark/structures.py @@ -28,6 +28,7 @@ from ..compat import final from ..compat import NOTSET from ..compat import NotSetType from _pytest.config import Config +from _pytest.deprecated import check_ispytest from _pytest.outcomes import fail from _pytest.warning_types import PytestUnknownMarkWarning @@ -39,10 +40,7 @@ EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" def istestfunc(func) -> bool: - return ( - hasattr(func, "__call__") - and getattr(func, "__name__", "<lambda>") != "<lambda>" - ) + return callable(func) and getattr(func, "__name__", "<lambda>") != "<lambda>" def get_empty_parameterset_mark( @@ -98,9 +96,7 @@ class ParameterSet( if id is not None: if not isinstance(id, str): - raise TypeError( - "Expected id to be a string, got {}: {!r}".format(type(id), id) - ) + raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}") id = ascii_escaped(id) return cls(values, marks, id) @@ -200,21 +196,38 @@ class ParameterSet( @final -@attr.s(frozen=True) +@attr.s(frozen=True, init=False, auto_attribs=True) class Mark: #: Name of the mark. - name = attr.ib(type=str) + name: str #: Positional arguments of the mark decorator. - args = attr.ib(type=Tuple[Any, ...]) + args: Tuple[Any, ...] #: Keyword arguments of the mark decorator. - kwargs = attr.ib(type=Mapping[str, Any]) + kwargs: Mapping[str, Any] #: Source Mark for ids with parametrize Marks. - _param_ids_from = attr.ib(type=Optional["Mark"], default=None, repr=False) + _param_ids_from: Optional["Mark"] = attr.ib(default=None, repr=False) #: Resolved/generated ids with parametrize Marks. - _param_ids_generated = attr.ib( - type=Optional[Sequence[str]], default=None, repr=False - ) + _param_ids_generated: Optional[Sequence[str]] = attr.ib(default=None, repr=False) + + def __init__( + self, + name: str, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Optional["Mark"] = None, + param_ids_generated: Optional[Sequence[str]] = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) def _has_param_ids(self) -> bool: return "ids" in self.kwargs or len(self.args) >= 4 @@ -243,20 +256,21 @@ class Mark: self.args + other.args, dict(self.kwargs, **other.kwargs), param_ids_from=param_ids_from, + _ispytest=True, ) # A generic parameter designating an object to which a Mark may # be applied -- a test function (callable) or class. # Note: a lambda is not allowed, but this can't be represented. -_Markable = TypeVar("_Markable", bound=Union[Callable[..., object], type]) +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) -@attr.s +@attr.s(init=False, auto_attribs=True) class MarkDecorator: """A decorator for applying a mark on test functions and classes. - MarkDecorators are created with ``pytest.mark``:: + ``MarkDecorators`` are created with ``pytest.mark``:: mark1 = pytest.mark.NAME # Simple MarkDecorator mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator @@ -267,7 +281,7 @@ class MarkDecorator: def test_function(): pass - When a MarkDecorator is called it does the following: + When a ``MarkDecorator`` is called, it does the following: 1. If called with a single class as its only positional argument and no additional keyword arguments, it attaches the mark to the class so it @@ -276,19 +290,24 @@ class MarkDecorator: 2. If called with a single function as its only positional argument and no additional keyword arguments, it attaches the mark to the function, containing all the arguments already stored internally in the - MarkDecorator. + ``MarkDecorator``. - 3. When called in any other case, it returns a new MarkDecorator instance - with the original MarkDecorator's content updated with the arguments - passed to this call. + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. - Note: The rules above prevent MarkDecorators from storing only a single - function or class reference as their positional argument with no + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no additional keyword or positional arguments. You can work around this by using `with_args()`. """ - mark = attr.ib(type=Mark, validator=attr.validators.instance_of(Mark)) + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark @property def name(self) -> str: @@ -307,27 +326,23 @@ class MarkDecorator: @property def markname(self) -> str: + """:meta private:""" return self.name # for backward-compat (2.4.1 had this attr) - def __repr__(self) -> str: - return f"<MarkDecorator {self.mark!r}>" - def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": """Return a MarkDecorator with extra arguments added. Unlike calling the MarkDecorator, with_args() can be used even if the sole argument is a callable/class. - - :rtype: MarkDecorator """ - mark = Mark(self.name, args, kwargs) - return self.__class__(self.mark.combined_with(mark)) + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) # Type ignored because the overloads overlap with an incompatible # return type. Not much we can do about that. Thankfully mypy picks # the first match so it works out even if we break the rules. @overload - def __call__(self, arg: _Markable) -> _Markable: # type: ignore[misc] + def __call__(self, arg: Markable) -> Markable: # type: ignore[misc] pass @overload @@ -345,7 +360,7 @@ class MarkDecorator: return self.with_args(*args, **kwargs) -def get_unpacked_marks(obj) -> List[Mark]: +def get_unpacked_marks(obj: object) -> Iterable[Mark]: """Obtain the unpacked marks that are stored on an object.""" mark_list = getattr(obj, "pytestmark", []) if not isinstance(mark_list, list): @@ -353,19 +368,21 @@ def get_unpacked_marks(obj) -> List[Mark]: return normalize_mark_list(mark_list) -def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List[Mark]: - """Normalize marker decorating helpers to mark objects. +def normalize_mark_list( + mark_list: Iterable[Union[Mark, MarkDecorator]] +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. - :type List[Union[Mark, Markdecorator]] mark_list: - :rtype: List[Mark] + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects """ - extracted = [ - getattr(mark, "mark", mark) for mark in mark_list - ] # unpack MarkDecorator - for mark in extracted: - if not isinstance(mark, Mark): - raise TypeError(f"got {mark!r} instead of Mark") - return [x for x in extracted if isinstance(x, Mark)] + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {repr(mark_obj)} instead of Mark") + yield mark_obj def store_mark(obj, mark: Mark) -> None: @@ -376,17 +393,17 @@ def store_mark(obj, mark: Mark) -> None: assert isinstance(mark, Mark), mark # Always reassign name to avoid updating pytestmark in a reference that # was only borrowed. - obj.pytestmark = get_unpacked_marks(obj) + [mark] + obj.pytestmark = [*get_unpacked_marks(obj), mark] # Typing for builtin pytest marks. This is cheating; it gives builtin marks # special privilege, and breaks modularity. But practicality beats purity... if TYPE_CHECKING: - from _pytest.fixtures import _Scope + from _pytest.scope import _ScopeName class _SkipMarkDecorator(MarkDecorator): @overload # type: ignore[override,misc] - def __call__(self, arg: _Markable) -> _Markable: + def __call__(self, arg: Markable) -> Markable: ... @overload @@ -404,7 +421,7 @@ if TYPE_CHECKING: class _XfailMarkDecorator(MarkDecorator): @overload # type: ignore[override,misc] - def __call__(self, arg: _Markable) -> _Markable: + def __call__(self, arg: Markable) -> Markable: ... @overload @@ -432,20 +449,16 @@ if TYPE_CHECKING: Callable[[Any], Optional[object]], ] ] = ..., - scope: Optional[_Scope] = ..., + scope: Optional[_ScopeName] = ..., ) -> MarkDecorator: ... class _UsefixturesMarkDecorator(MarkDecorator): - def __call__( # type: ignore[override] - self, *fixtures: str - ) -> MarkDecorator: + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] ... class _FilterwarningsMarkDecorator(MarkDecorator): - def __call__( # type: ignore[override] - self, *filters: str - ) -> MarkDecorator: + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] ... @@ -465,9 +478,6 @@ class MarkGenerator: applies a 'slowtest' :class:`Mark` on ``test_function``. """ - _config: Optional[Config] = None - _markers: Set[str] = set() - # See TYPE_CHECKING above. if TYPE_CHECKING: skip: _SkipMarkDecorator @@ -477,7 +487,13 @@ class MarkGenerator: usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Optional[Config] = None + self._markers: Set[str] = set() + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") @@ -513,19 +529,21 @@ class MarkGenerator: warnings.warn( "Unknown pytest.mark.%s - is this a typo? You can register " "custom marks to avoid this warning - for details, see " - "https://docs.pytest.org/en/stable/mark.html" % name, + "https://docs.pytest.org/en/stable/how-to/mark.html" % name, PytestUnknownMarkWarning, 2, ) - return MarkDecorator(Mark(name, (), {})) + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) -MARK_GEN = MarkGenerator() +MARK_GEN = MarkGenerator(_ispytest=True) @final class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("node", "parent", "_markers") + def __init__(self, node: "Node") -> None: self.node = node self.parent = node.parent @@ -542,21 +560,39 @@ class NodeKeywords(MutableMapping[str, Any]): def __setitem__(self, key: str, value: Any) -> None: self._markers[key] = value + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return ( + key in self._markers + or self.parent is not None + and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + def __delitem__(self, key: str) -> None: raise ValueError("cannot delete key in keywords dict") def __iter__(self) -> Iterator[str]: - seen = self._seen() - return iter(seen) - - def _seen(self) -> Set[str]: - seen = set(self._markers) + # Doesn't need to be fast. + yield from self._markers if self.parent is not None: - seen.update(self.parent.keywords) - return seen + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword def __len__(self) -> int: - return len(self._seen()) + # Doesn't need to be fast. + return sum(1 for keyword in self) def __repr__(self) -> str: return f"<NodeKeywords for node {self.node}>" diff --git a/contrib/python/pytest/py3/_pytest/monkeypatch.py b/contrib/python/pytest/py3/_pytest/monkeypatch.py index a052f693ac..31f95a95ab 100644 --- a/contrib/python/pytest/py3/_pytest/monkeypatch.py +++ b/contrib/python/pytest/py3/_pytest/monkeypatch.py @@ -4,7 +4,6 @@ import re import sys import warnings from contextlib import contextmanager -from pathlib import Path from typing import Any from typing import Generator from typing import List @@ -37,7 +36,7 @@ def monkeypatch() -> Generator["MonkeyPatch", None, None]: monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) + monkeypatch.setenv(name, value, prepend=None) monkeypatch.delenv(name, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) @@ -92,7 +91,7 @@ def annotated_getattr(obj: object, name: str, ann: str) -> object: def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]: - if not isinstance(import_path, str) or "." not in import_path: # type: ignore[unreachable] + if not isinstance(import_path, str) or "." not in import_path: raise TypeError(f"must be absolute import path string, not {import_path!r}") module, attr = import_path.rsplit(".", 1) target = resolve(module) @@ -125,7 +124,7 @@ class MonkeyPatch: def __init__(self) -> None: self._setattr: List[Tuple[object, str, object]] = [] - self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = ([]) + self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = [] self._cwd: Optional[str] = None self._savesyspath: Optional[List[str]] = None @@ -148,7 +147,7 @@ class MonkeyPatch: Useful in situations where it is desired to undo some patches before the test ends, such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples - of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_. + of this see :issue:`3290`). """ m = cls() try: @@ -158,13 +157,21 @@ class MonkeyPatch: @overload def setattr( - self, target: str, name: object, value: Notset = ..., raising: bool = ..., + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., ) -> None: ... @overload def setattr( - self, target: object, name: str, value: object, raising: bool = ..., + self, + target: object, + name: str, + value: object, + raising: bool = ..., ) -> None: ... @@ -305,14 +312,17 @@ class MonkeyPatch: def syspath_prepend(self, path) -> None: """Prepend ``path`` to ``sys.path`` list of import locations.""" - from pkg_resources import fixup_namespace_packages if self._savesyspath is None: self._savesyspath = sys.path[:] sys.path.insert(0, str(path)) # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 - fixup_namespace_packages(str(path)) + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) # A call to syspathinsert() usually means that the caller wants to # import some dynamically created files, thus with python3 we @@ -325,20 +335,14 @@ class MonkeyPatch: invalidate_caches() - def chdir(self, path) -> None: + def chdir(self, path: Union[str, "os.PathLike[str]"]) -> None: """Change the current working directory to the specified path. - Path can be a string or a py.path.local object. + Path can be a string or a path object. """ if self._cwd is None: self._cwd = os.getcwd() - if hasattr(path, "chdir"): - path.chdir() - elif isinstance(path, Path): - # Modern python uses the fspath protocol here LEGACY - os.chdir(str(path)) - else: - os.chdir(path) + os.chdir(path) def undo(self) -> None: """Undo previous changes. diff --git a/contrib/python/pytest/py3/_pytest/nodes.py b/contrib/python/pytest/py3/_pytest/nodes.py index 27434fb6a6..e49c1b003e 100644 --- a/contrib/python/pytest/py3/_pytest/nodes.py +++ b/contrib/python/pytest/py3/_pytest/nodes.py @@ -1,10 +1,14 @@ import os import warnings +from inspect import signature from pathlib import Path +from typing import Any from typing import Callable +from typing import cast from typing import Iterable from typing import Iterator from typing import List +from typing import MutableMapping from typing import Optional from typing import overload from typing import Set @@ -14,22 +18,24 @@ from typing import TYPE_CHECKING from typing import TypeVar from typing import Union -import py - import _pytest._code from _pytest._code import getfslineno from _pytest._code.code import ExceptionInfo from _pytest._code.code import TerminalRepr from _pytest.compat import cached_property +from _pytest.compat import LEGACY_PATH from _pytest.config import Config from _pytest.config import ConftestImportFailure from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG from _pytest.mark.structures import Mark from _pytest.mark.structures import MarkDecorator from _pytest.mark.structures import NodeKeywords from _pytest.outcomes import fail from _pytest.pathlib import absolutepath -from _pytest.store import Store +from _pytest.pathlib import commonpath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning if TYPE_CHECKING: # Imported here due to circular import. @@ -39,7 +45,7 @@ if TYPE_CHECKING: SEP = "/" -tracebackcutdir = py.path.local(_pytest.__file__).dirpath() +tracebackcutdir = Path(_pytest.__file__).parent def iterparentnodeids(nodeid: str) -> Iterator[str]: @@ -58,23 +64,62 @@ def iterparentnodeids(nodeid: str) -> Iterator[str]: "testing/code/test_excinfo.py::TestFormattedExcinfo" "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" - Note that :: parts are only considered at the last / component. + Note that / components are only considered until the first ::. """ pos = 0 - sep = SEP + first_colons: Optional[int] = nodeid.find("::") + if first_colons == -1: + first_colons = None + # The root Session node - always present. yield "" + # Eagerly consume SEP parts until first colons. while True: - at = nodeid.find(sep, pos) - if at == -1 and sep == SEP: - sep = "::" - elif at == -1: - if nodeid: - yield nodeid + at = nodeid.find(SEP, pos, first_colons) + if at == -1: break - else: - if at: - yield nodeid[:at] - pos = at + len(sep) + if at > 0: + yield nodeid[:at] + pos = at + len(SEP) + # Eagerly consume :: parts. + while True: + at = nodeid.find("::", pos) + if at == -1: + break + if at > 0: + yield nodeid[:at] + pos = at + len("::") + # The node ID itself. + if nodeid: + yield nodeid + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +def _imply_path( + node_type: Type["Node"], + path: Optional[Path], + fspath: Optional[LEGACY_PATH], +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=3, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) _NodeType = TypeVar("_NodeType", bound="Node") @@ -87,11 +132,27 @@ class NodeMeta(type): "See " "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" " for more details." - ).format(name=self.__name__) + ).format(name=f"{self.__module__}.{self.__name__}") fail(msg, pytrace=False) def _create(self, *k, **kw): - return super().__call__(*k, **kw) + try: + return super().__call__(*k, **kw) + except TypeError: + sig = signature(getattr(self, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{self} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) class Node(metaclass=NodeMeta): @@ -101,6 +162,13 @@ class Node(metaclass=NodeMeta): Collector subclasses have children; Items are leaf nodes. """ + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer + #: using :attr:`path` instead. + fspath: LEGACY_PATH + # Use __slots__ to make attribute access faster. # Note that __dict__ is still available. __slots__ = ( @@ -108,7 +176,7 @@ class Node(metaclass=NodeMeta): "parent", "config", "session", - "fspath", + "path", "_nodeid", "_store", "__dict__", @@ -120,7 +188,8 @@ class Node(metaclass=NodeMeta): parent: "Optional[Node]" = None, config: Optional[Config] = None, session: "Optional[Session]" = None, - fspath: Optional[py.path.local] = None, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, nodeid: Optional[str] = None, ) -> None: #: A unique name within the scope of the parent node. @@ -129,27 +198,30 @@ class Node(metaclass=NodeMeta): #: The parent collector node. self.parent = parent - #: The pytest config object. if config: + #: The pytest config object. self.config: Config = config else: if not parent: raise TypeError("config or parent must be provided") self.config = parent.config - #: The pytest session this node is part of. if session: + #: The pytest session this node is part of. self.session = session else: if not parent: raise TypeError("session or parent must be provided") self.session = parent.session + if path is None and fspath is None: + path = getattr(parent, "path", None) #: Filesystem path where this node was collected from (can be None). - self.fspath = fspath or getattr(parent, "fspath", None) + self.path: Path = _imply_path(type(self), path, fspath=fspath) + # The explicit annotation is to avoid publicly exposing NodeKeywords. #: Keywords/markers collected from all scopes. - self.keywords = NodeKeywords(self) + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) #: The marker objects belonging to this node. self.own_markers: List[Mark] = [] @@ -163,13 +235,15 @@ class Node(metaclass=NodeMeta): else: if not self.parent: raise TypeError("nodeid or parent must be provided") - self._nodeid = self.parent.nodeid - if self.name != "()": - self._nodeid += "::" + self.name + self._nodeid = self.parent.nodeid + "::" + self.name - # A place where plugins can store information on the node for their - # own use. Currently only intended for internal plugins. - self._store = Store() + #: A place where plugins can store information on the node for their + #: own use. + #: + #: :type: Stash + self.stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash @classmethod def from_parent(cls, parent: "Node", **kw): @@ -192,7 +266,7 @@ class Node(metaclass=NodeMeta): @property def ihook(self): """fspath-sensitive hook proxy used to call pytest hooks.""" - return self.session.gethookproxy(self.fspath) + return self.session.gethookproxy(self.path) def __repr__(self) -> str: return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) @@ -228,7 +302,10 @@ class Node(metaclass=NodeMeta): path, lineno = get_fslocation_from_item(self) assert lineno is not None warnings.warn_explicit( - warning, category=None, filename=str(path), lineno=lineno + 1, + warning, + category=None, + filename=str(path), + lineno=lineno + 1, ) # Methods for ordering nodes. @@ -357,7 +434,7 @@ class Node(metaclass=NodeMeta): from _pytest.fixtures import FixtureLookupError if isinstance(excinfo.value, ConftestImportFailure): - excinfo = ExceptionInfo(excinfo.value.excinfo) + excinfo = ExceptionInfo.from_exc_info(excinfo.value.excinfo) if isinstance(excinfo.value, fail.Exception): if not excinfo.value.pytrace: style = "value" @@ -411,21 +488,21 @@ class Node(metaclass=NodeMeta): ) -> Union[str, TerminalRepr]: """Return a representation of a collection or test failure. + .. seealso:: :ref:`non-python tests` + :param excinfo: Exception information for the failure. """ return self._repr_failure_py(excinfo, style) -def get_fslocation_from_item( - node: "Node", -) -> Tuple[Union[str, py.path.local], Optional[int]]: +def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[int]]: """Try to extract the actual location from a node, depending on available attributes: * "location": a pair (path, lineno) * "obj": a Python object that the node wraps. * "fspath": just a path - :rtype: A tuple of (str|py.path.local, int) with filename and line number. + :rtype: A tuple of (str|Path, int) with filename and line number. """ # See Item.location. location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None) @@ -472,59 +549,94 @@ class Collector(Node): return self._repr_failure_py(excinfo, style=tbstyle) def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: - if hasattr(self, "fspath"): + if hasattr(self, "path"): traceback = excinfo.traceback - ntraceback = traceback.cut(path=self.fspath) + ntraceback = traceback.cut(path=self.path) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() -def _check_initialpaths_for_relpath(session, fspath): +def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[str]: for initial_path in session._initialpaths: - if fspath.common(initial_path) == initial_path: - return fspath.relto(initial_path) + if commonpath(path, initial_path) == initial_path: + rel = str(path.relative_to(initial_path)) + return "" if rel == "." else rel + return None class FSCollector(Collector): def __init__( self, - fspath: py.path.local, - parent=None, + fspath: Optional[LEGACY_PATH] = None, + path_or_parent: Optional[Union[Path, Node]] = None, + path: Optional[Path] = None, + name: Optional[str] = None, + parent: Optional[Node] = None, config: Optional[Config] = None, session: Optional["Session"] = None, nodeid: Optional[str] = None, ) -> None: - name = fspath.basename - if parent is not None: - rel = fspath.relto(parent.fspath) - if rel: - name = rel - name = name.replace(os.sep, SEP) - self.fspath = fspath - - session = session or parent.session + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session if nodeid is None: - nodeid = self.fspath.relto(session.config.rootdir) + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session, path) - if not nodeid: - nodeid = _check_initialpaths_for_relpath(session, fspath) if nodeid and os.sep != SEP: nodeid = nodeid.replace(os.sep, SEP) - super().__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath) + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) @classmethod - def from_parent(cls, parent, *, fspath, **kw): + def from_parent( + cls, + parent, + *, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, + **kw, + ): """The public constructor.""" - return super().from_parent(parent=parent, fspath=fspath, **kw) + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) - def gethookproxy(self, fspath: py.path.local): + def gethookproxy(self, fspath: "os.PathLike[str]"): warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) return self.session.gethookproxy(fspath) - def isinitpath(self, path: py.path.local) -> bool: + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) return self.session.isinitpath(path) @@ -551,15 +663,64 @@ class Item(Node): config: Optional[Config] = None, session: Optional["Session"] = None, nodeid: Optional[str] = None, + **kw, ) -> None: - super().__init__(name, parent, config, session, nodeid=nodeid) + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) self._report_sections: List[Tuple[str, str, str]] = [] #: A list of tuples (name, value) that holds user defined properties #: for this test. self.user_properties: List[Tuple[str, object]] = [] + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ raise NotImplementedError("runtest must be implemented by Item subclass") def add_report_section(self, when: str, key: str, content: str) -> None: @@ -579,13 +740,23 @@ class Item(Node): if content: self._report_sections.append((when, key, content)) - def reportinfo(self) -> Tuple[Union[py.path.local, str], Optional[int], str]: - return self.fspath, None, "" + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" @cached_property def location(self) -> Tuple[str, Optional[int], str]: location = self.reportinfo() - fspath = absolutepath(str(location[0])) - relfspath = self.session._node_location_to_relpath(fspath) + path = absolutepath(os.fspath(location[0])) + relfspath = self.session._node_location_to_relpath(path) assert type(location[2]) is str return (relfspath, location[1], location[2]) diff --git a/contrib/python/pytest/py3/_pytest/nose.py b/contrib/python/pytest/py3/_pytest/nose.py index bb8f99772a..b0699d22bd 100644 --- a/contrib/python/pytest/py3/_pytest/nose.py +++ b/contrib/python/pytest/py3/_pytest/nose.py @@ -1,39 +1,42 @@ """Run testsuites written for nose.""" -from _pytest import python -from _pytest import unittest from _pytest.config import hookimpl +from _pytest.fixtures import getfixturemarker from _pytest.nodes import Item +from _pytest.python import Function +from _pytest.unittest import TestCaseFunction @hookimpl(trylast=True) -def pytest_runtest_setup(item): - if is_potential_nosetest(item): - if not call_optional(item.obj, "setup"): - # Call module level setup if there is no object level one. - call_optional(item.parent.obj, "setup") - # XXX This implies we only call teardown when setup worked. - item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) +def pytest_runtest_setup(item: Item) -> None: + if not isinstance(item, Function): + return + # Don't do nose style setup/teardown on direct unittest style classes. + if isinstance(item, TestCaseFunction): + return + # Capture the narrowed type of item for the teardown closure, + # see https://github.com/python/mypy/issues/2608 + func = item -def teardown_nose(item): - if is_potential_nosetest(item): - if not call_optional(item.obj, "teardown"): - call_optional(item.parent.obj, "teardown") + call_optional(func.obj, "setup") + func.addfinalizer(lambda: call_optional(func.obj, "teardown")) + # NOTE: Module- and class-level fixtures are handled in python.py + # with `pluginmanager.has_plugin("nose")` checks. + # It would have been nicer to implement them outside of core, but + # it's not straightforward. -def is_potential_nosetest(item: Item) -> bool: - # Extra check needed since we do not do nose style setup/teardown - # on direct unittest style classes. - return isinstance(item, python.Function) and not isinstance( - item, unittest.TestCaseFunction - ) - -def call_optional(obj, name): +def call_optional(obj: object, name: str) -> bool: method = getattr(obj, name, None) - isfixture = hasattr(method, "_pytestfixturefunction") - if method is not None and not isfixture and callable(method): - # If there's any problems allow the exception to raise rather than - # silently ignoring them. - method() - return True + if method is None: + return False + is_fixture = getfixturemarker(method) is not None + if is_fixture: + return False + if not callable(method): + return False + # If there are any problems allow the exception to raise rather than + # silently ignoring it. + method() + return True diff --git a/contrib/python/pytest/py3/_pytest/outcomes.py b/contrib/python/pytest/py3/_pytest/outcomes.py index 8f6203fd7f..25206fe0e8 100644 --- a/contrib/python/pytest/py3/_pytest/outcomes.py +++ b/contrib/python/pytest/py3/_pytest/outcomes.py @@ -1,6 +1,7 @@ """Exception classes and constants handling test outcomes as well as functions creating them.""" import sys +import warnings from typing import Any from typing import Callable from typing import cast @@ -8,6 +9,8 @@ from typing import Optional from typing import Type from typing import TypeVar +from _pytest.deprecated import KEYWORD_MSG_ARG + TYPE_CHECKING = False # Avoid circular import through compat. if TYPE_CHECKING: @@ -33,7 +36,7 @@ class OutcomeException(BaseException): "Perhaps you meant to use a mark?" ) raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) - BaseException.__init__(self, msg) + super().__init__(msg) self.msg = msg self.pytrace = pytrace @@ -58,9 +61,14 @@ class Skipped(OutcomeException): msg: Optional[str] = None, pytrace: bool = True, allow_module_level: bool = False, + *, + _use_item_location: bool = False, ) -> None: - OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + super().__init__(msg=msg, pytrace=pytrace) self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location class Failed(OutcomeException): @@ -105,52 +113,124 @@ def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _E @_with_exception(Exit) -def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn": +def exit( + reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None +) -> "NoReturn": """Exit testing process. - :param str msg: Message to display upon exit. - :param int returncode: Return code to be used when exiting pytest. + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. """ __tracebackhide__ = True - raise Exit(msg, returncode) + from _pytest.config import UsageError + + if reason and msg: + raise UsageError( + "cannot pass reason and msg to exit(), `msg` is deprecated, use `reason`." + ) + if not reason: + if msg is None: + raise UsageError("exit() requires a reason argument") + warnings.warn(KEYWORD_MSG_ARG.format(func="exit"), stacklevel=2) + reason = msg + raise Exit(reason, returncode) @_with_exception(Skipped) -def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn": +def skip( + reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None +) -> "NoReturn": """Skip an executing test with the given message. This function should be called only during testing (setup, call or teardown) or during collection by using the ``allow_module_level`` flag. This function can be called in doctests as well. - :param bool allow_module_level: + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: Allows this function to be called at module level, skipping the rest of the module. Defaults to False. + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + .. note:: It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be skipped under certain conditions like mismatching platforms or dependencies. - Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP - <https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_) + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) to skip a doctest statically. """ __tracebackhide__ = True - raise Skipped(msg=msg, allow_module_level=allow_module_level) + reason = _resolve_msg_to_reason("skip", reason, msg) + raise Skipped(msg=reason, allow_module_level=allow_module_level) @_with_exception(Failed) -def fail(msg: str = "", pytrace: bool = True) -> "NoReturn": +def fail( + reason: str = "", pytrace: bool = True, msg: Optional[str] = None +) -> "NoReturn": """Explicitly fail an executing test with the given message. - :param str msg: + :param reason: The message to show the user as reason for the failure. - :param bool pytrace: + + :param pytrace: If False, msg represents the full failure information and no python traceback will be reported. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + """ + __tracebackhide__ = True + reason = _resolve_msg_to_reason("fail", reason, msg) + raise Failed(msg=reason, pytrace=pytrace) + + +def _resolve_msg_to_reason( + func_name: str, reason: str, msg: Optional[str] = None +) -> str: + """ + Handles converting the deprecated msg parameter if provided into + reason, raising a deprecation warning. This function will be removed + when the optional msg argument is removed from here in future. + + :param str func_name: + The name of the offending function, this is formatted into the deprecation message. + + :param str reason: + The reason= passed into either pytest.fail() or pytest.skip() + + :param str msg: + The msg= passed into either pytest.fail() or pytest.skip(). This will + be converted into reason if it is provided to allow pytest.skip(msg=) or + pytest.fail(msg=) to continue working in the interim period. + + :returns: + The value to use as reason. + """ __tracebackhide__ = True - raise Failed(msg=msg, pytrace=pytrace) + if msg is not None: + + if reason: + from pytest import UsageError + + raise UsageError( + f"Passing both ``reason`` and ``msg`` to pytest.{func_name}(...) is not permitted." + ) + warnings.warn(KEYWORD_MSG_ARG.format(func=func_name), stacklevel=3) + reason = msg + return reason class XFailed(Failed): diff --git a/contrib/python/pytest/py3/_pytest/pastebin.py b/contrib/python/pytest/py3/_pytest/pastebin.py index 131873c174..385b3022cc 100644 --- a/contrib/python/pytest/py3/_pytest/pastebin.py +++ b/contrib/python/pytest/py3/_pytest/pastebin.py @@ -8,11 +8,11 @@ import pytest from _pytest.config import Config from _pytest.config import create_terminal_writer from _pytest.config.argparsing import Parser -from _pytest.store import StoreKey +from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter -pastebinfile_key = StoreKey[IO[bytes]]() +pastebinfile_key = StashKey[IO[bytes]]() def pytest_addoption(parser: Parser) -> None: @@ -37,26 +37,26 @@ def pytest_configure(config: Config) -> None: # when using pytest-xdist, for example. if tr is not None: # pastebin file will be UTF-8 encoded binary file. - config._store[pastebinfile_key] = tempfile.TemporaryFile("w+b") + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) if isinstance(s, str): s = s.encode("utf-8") - config._store[pastebinfile_key].write(s) + config.stash[pastebinfile_key].write(s) tr._tw.write = tee_write def pytest_unconfigure(config: Config) -> None: - if pastebinfile_key in config._store: - pastebinfile = config._store[pastebinfile_key] + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] # Get terminal contents and delete file. pastebinfile.seek(0) sessionlog = pastebinfile.read() pastebinfile.close() - del config._store[pastebinfile_key] + del config.stash[pastebinfile_key] # Undo our patching in the terminal reporter. tr = config.pluginmanager.getplugin("terminalreporter") del tr._tw.__dict__["write"] @@ -77,7 +77,7 @@ def create_new_paste(contents: Union[str, bytes]) -> str: from urllib.parse import urlencode params = {"code": contents, "lexer": "text", "expiry": "1week"} - url = "https://bpaste.net" + url = "https://bpa.st" try: response: str = ( urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") @@ -86,7 +86,7 @@ def create_new_paste(contents: Union[str, bytes]) -> str: return "bad response: %s" % exc_info m = re.search(r'href="/raw/(\w+)"', response) if m: - return "{}/show/{}".format(url, m.group(1)) + return f"{url}/show/{m.group(1)}" else: return "bad response: invalid format ('" + response + "')" diff --git a/contrib/python/pytest/py3/_pytest/pathlib.py b/contrib/python/pytest/py3/_pytest/pathlib.py index 7d9269a185..b44753e1a4 100644 --- a/contrib/python/pytest/py3/_pytest/pathlib.py +++ b/contrib/python/pytest/py3/_pytest/pathlib.py @@ -23,6 +23,7 @@ from pathlib import PurePath from posixpath import sep as posix_sep from types import ModuleType from typing import Callable +from typing import Dict from typing import Iterable from typing import Iterator from typing import Optional @@ -30,8 +31,6 @@ from typing import Set from typing import TypeVar from typing import Union -import py - from _pytest.compat import assert_never from _pytest.outcomes import skip from _pytest.warning_types import PytestWarning @@ -347,7 +346,11 @@ def cleanup_numbered_dir( def make_numbered_dir_with_cleanup( - root: Path, prefix: str, keep: int, lock_timeout: float, mode: int, + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, ) -> Path: """Create a numbered dir with a cleanup lock and remove old ones.""" e = None @@ -382,7 +385,7 @@ def resolve_from_str(input: str, rootpath: Path) -> Path: return rootpath.joinpath(input) -def fnmatch_ex(pattern: str, path) -> bool: +def fnmatch_ex(pattern: str, path: Union[str, "os.PathLike[str]"]) -> bool: """A port of FNMatcher from py.path.common which works with PurePath() instances. The difference between this algorithm and PurePath.match() is that the @@ -449,9 +452,10 @@ class ImportPathMismatchError(ImportError): def import_path( - p: Union[str, py.path.local, Path], + p: Union[str, "os.PathLike[str]"], *, mode: Union[str, ImportMode] = ImportMode.prepend, + root: Path, ) -> ModuleType: """Import and return a module from the given path, which can be a file (a module) or a directory (a package). @@ -469,19 +473,24 @@ def import_path( to import the module, which avoids having to use `__import__` and muck with `sys.path` at all. It effectively allows having same-named test modules in different places. + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + :raises ImportPathMismatchError: If after importing the given `path` and the module `__file__` are different. Only raised in `prepend` and `append` modes. """ mode = ImportMode(mode) - path = Path(str(p)) + path = Path(p) if not path.exists(): raise ImportError(path) if mode is ImportMode.importlib: - module_name = path.stem + module_name = module_name_from_path(path, root) for meta_importer in sys.meta_path: spec = meta_importer.find_spec(module_name, [str(path.parent)]) @@ -491,11 +500,11 @@ def import_path( spec = importlib.util.spec_from_file_location(module_name, str(path)) if spec is None: - raise ImportError( - "Can't find module {} at location {}".format(module_name, str(path)) - ) + raise ImportError(f"Can't find module {module_name} at location {path}") mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod spec.loader.exec_module(mod) # type: ignore[union-attr] + insert_missing_modules(sys.modules, module_name) return mod pkg_path = resolve_package_path(path) @@ -560,6 +569,47 @@ else: return os.path.samefile(f1, f2) +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + return ".".join(path_parts) + + +def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + if module_name not in modules: + module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[module_name] = module + module_parts.pop(-1) + module_name = ".".join(module_parts) + + def resolve_package_path(path: Path) -> Optional[Path]: """Return the Python package path by looking for the last directory upwards which still contains an __init__.py. @@ -578,7 +628,7 @@ def resolve_package_path(path: Path) -> Optional[Path]: def visit( - path: str, recurse: Callable[["os.DirEntry[str]"], bool] + path: Union[str, "os.PathLike[str]"], recurse: Callable[["os.DirEntry[str]"], bool] ) -> Iterator["os.DirEntry[str]"]: """Walk a directory recursively, in breadth-first order. @@ -635,6 +685,8 @@ def bestrelpath(directory: Path, dest: Path) -> str: If no such path can be determined, returns dest. """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) if dest == directory: return os.curdir # Find the longest common directory. @@ -652,3 +704,21 @@ def bestrelpath(directory: Path, dest: Path) -> str: # Forward from base to dest. *reldest.parts, ) + + +# Originates from py. path.local.copy(), with siginficant trims and adjustments. +# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True) +def copytree(source: Path, target: Path) -> None: + """Recursively copy a source directory to target.""" + assert source.is_dir() + for entry in visit(source, recurse=lambda entry: not entry.is_symlink()): + x = Path(entry) + relpath = x.relative_to(source) + newx = target / relpath + newx.parent.mkdir(exist_ok=True) + if x.is_symlink(): + newx.symlink_to(os.readlink(x)) + elif x.is_file(): + shutil.copyfile(x, newx) + elif x.is_dir(): + newx.mkdir(exist_ok=True) diff --git a/contrib/python/pytest/py3/_pytest/pytester.py b/contrib/python/pytest/py3/_pytest/pytester.py index 31259d1bdc..363a372744 100644 --- a/contrib/python/pytest/py3/_pytest/pytester.py +++ b/contrib/python/pytest/py3/_pytest/pytester.py @@ -20,6 +20,7 @@ from typing import Any from typing import Callable from typing import Dict from typing import Generator +from typing import IO from typing import Iterable from typing import List from typing import Optional @@ -32,8 +33,6 @@ from typing import TYPE_CHECKING from typing import Union from weakref import WeakKeyDictionary -import attr -import py from iniconfig import IniConfig from iniconfig import SectionWrapper @@ -41,6 +40,8 @@ from _pytest import timing from _pytest._code import Source from _pytest.capture import _get_multicapture from _pytest.compat import final +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config import ExitCode @@ -58,13 +59,17 @@ from _pytest.nodes import Item from _pytest.outcomes import fail from _pytest.outcomes import importorskip from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import copytree from _pytest.pathlib import make_numbered_dir from _pytest.reports import CollectReport from _pytest.reports import TestReport from _pytest.tmpdir import TempPathFactory from _pytest.warning_types import PytestWarning + if TYPE_CHECKING: + from typing_extensions import Final from typing_extensions import Literal import pexpect @@ -207,7 +212,20 @@ def get_public_names(values: Iterable[str]) -> List[str]: return [x for x in values if x[0] != "_"] -class ParsedCall: +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + def __init__(self, name: str, kwargs) -> None: self.__dict__.update(kwargs) self._name = name @@ -215,7 +233,7 @@ class ParsedCall: def __repr__(self) -> str: d = self.__dict__.copy() del d["_name"] - return f"<ParsedCall {self._name!r}(**{d!r})>" + return f"<RecordedHookCall {self._name!r}(**{d!r})>" if TYPE_CHECKING: # The class has undetermined attributes, this tells mypy about it. @@ -223,20 +241,27 @@ class ParsedCall: ... +@final class HookRecorder: """Record all hooks called in a plugin manager. + Hook recorders are created by :class:`Pytester`. + This wraps all the hook calls in the plugin manager, recording each call before propagating the normal calls. """ - def __init__(self, pluginmanager: PytestPluginManager) -> None: + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._pluginmanager = pluginmanager - self.calls: List[ParsedCall] = [] + self.calls: List[RecordedHookCall] = [] self.ret: Optional[Union[int, ExitCode]] = None def before(hook_name: str, hook_impls, kwargs) -> None: - self.calls.append(ParsedCall(hook_name, kwargs)) + self.calls.append(RecordedHookCall(hook_name, kwargs)) def after(outcome, hook_name: str, hook_impls, kwargs) -> None: pass @@ -246,7 +271,8 @@ class HookRecorder: def finish_recording(self) -> None: self._undo_wrapping() - def getcalls(self, names: Union[str, Iterable[str]]) -> List[ParsedCall]: + def getcalls(self, names: Union[str, Iterable[str]]) -> List[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" if isinstance(names, str): names = names.split() return [call for call in self.calls if call._name in names] @@ -272,7 +298,7 @@ class HookRecorder: else: fail(f"could not find {name!r} check {check!r}") - def popcall(self, name: str) -> ParsedCall: + def popcall(self, name: str) -> RecordedHookCall: __tracebackhide__ = True for i, call in enumerate(self.calls): if call._name == name: @@ -282,7 +308,7 @@ class HookRecorder: lines.extend([" %s" % x for x in self.calls]) fail("\n".join(lines)) - def getcall(self, name: str) -> ParsedCall: + def getcall(self, name: str) -> RecordedHookCall: values = self.getcalls(name) assert len(values) == 1, (name, values) return values[0] @@ -291,13 +317,15 @@ class HookRecorder: @overload def getreports( - self, names: "Literal['pytest_collectreport']", + self, + names: "Literal['pytest_collectreport']", ) -> Sequence[CollectReport]: ... @overload def getreports( - self, names: "Literal['pytest_runtest_logreport']", + self, + names: "Literal['pytest_runtest_logreport']", ) -> Sequence[TestReport]: ... @@ -354,13 +382,15 @@ class HookRecorder: @overload def getfailures( - self, names: "Literal['pytest_collectreport']", + self, + names: "Literal['pytest_collectreport']", ) -> Sequence[CollectReport]: ... @overload def getfailures( - self, names: "Literal['pytest_runtest_logreport']", + self, + names: "Literal['pytest_runtest_logreport']", ) -> Sequence[TestReport]: ... @@ -419,7 +449,10 @@ class HookRecorder: outcomes = self.listoutcomes() assertoutcome( - outcomes, passed=passed, skipped=skipped, failed=failed, + outcomes, + passed=passed, + skipped=skipped, + failed=failed, ) def clear(self) -> None: @@ -459,17 +492,6 @@ def pytester(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> "Pyt @fixture -def testdir(pytester: "Pytester") -> "Testdir": - """ - Identical to :fixture:`pytester`, and provides an instance whose methods return - legacy ``py.path.local`` objects instead when applicable. - - New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. - """ - return Testdir(pytester, _ispytest=True) - - -@fixture def _sys_snapshot() -> Generator[None, None, None]: snappaths = SysPathsSnapshot() snapmods = SysModulesSnapshot() @@ -493,8 +515,9 @@ rex_session_duration = re.compile(r"\d+\.\d\ds") rex_outcome = re.compile(r"(\d+) (\w+)") +@final class RunResult: - """The result of running a command.""" + """The result of running a command from :class:`~pytest.Pytester`.""" def __init__( self, @@ -513,13 +536,13 @@ class RunResult: self.errlines = errlines """List of lines captured from stderr.""" self.stdout = LineMatcher(outlines) - """:class:`LineMatcher` of stdout. + """:class:`~pytest.LineMatcher` of stdout. - Use e.g. :func:`str(stdout) <LineMatcher.__str__()>` to reconstruct stdout, or the commonly used - :func:`stdout.fnmatch_lines() <LineMatcher.fnmatch_lines()>` method. + Use e.g. :func:`str(stdout) <pytest.LineMatcher.__str__()>` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>` method. """ self.stderr = LineMatcher(errlines) - """:class:`LineMatcher` of stderr.""" + """:class:`~pytest.LineMatcher` of stderr.""" self.duration = duration """Duration in seconds.""" @@ -573,9 +596,15 @@ class RunResult: errors: int = 0, xpassed: int = 0, xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, ) -> None: - """Assert that the specified outcomes appear with the respective - numbers (0 means it didn't occur) in the text output from a test run.""" + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ __tracebackhide__ = True from _pytest.pytester_assertions import assert_outcomes @@ -588,6 +617,8 @@ class RunResult: errors=errors, xpassed=xpassed, xfailed=xfailed, + warnings=warnings, + deselected=deselected, ) @@ -643,7 +674,7 @@ class Pytester: __test__ = False - CLOSE_STDIN = object + CLOSE_STDIN: "Final" = NOTSET class TimeoutExpired(Exception): pass @@ -659,7 +690,7 @@ class Pytester: self._request = request self._mod_collections: WeakKeyDictionary[ Collector, List[Union[Item, Collector]] - ] = (WeakKeyDictionary()) + ] = WeakKeyDictionary() if request.function: name: str = request.function.__name__ else: @@ -723,7 +754,7 @@ class Pytester: def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: """Create a new :py:class:`HookRecorder` for a PluginManager.""" - pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) self._request.addfinalizer(reprec.finish_recording) return reprec @@ -743,6 +774,11 @@ class Pytester: ) -> Path: items = list(files.items()) + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + def to_text(s: Union[Any, bytes]) -> str: return s.decode(encoding) if isinstance(s, bytes) else str(s) @@ -764,7 +800,7 @@ class Pytester: return ret def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: - r"""Create new file(s) in the test directory. + r"""Create new text file(s) in the test directory. :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. @@ -784,6 +820,12 @@ class Pytester: pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") """ return self._makefile(ext, args, kwargs) @@ -850,7 +892,7 @@ class Pytester: def syspathinsert( self, path: Optional[Union[str, "os.PathLike[str]"]] = None ) -> None: - """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + """Prepend a directory to sys.path, defaults to :attr:`path`. This is undone automatically when this object dies at the end of each test. @@ -887,7 +929,7 @@ class Pytester: example_dir = self._request.config.getini("pytester_example_dir") if example_dir is None: raise ValueError("pytester_example_dir is unset, can't copy examples") - example_dir = Path(str(self._request.config.rootdir)) / example_dir + example_dir = self._request.config.rootpath / example_dir for extra_element in self._request.node.iter_markers("pytester_example_path"): assert extra_element.args @@ -910,10 +952,7 @@ class Pytester: example_path = example_dir.joinpath(name) if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): - # TODO: py.path.local.copy can copy files to existing directories, - # while with shutil.copytree the destination directory cannot exist, - # we will need to roll our own in order to drop py.path.local completely - py.path.local(example_path).copy(py.path.local(self.path)) + copytree(example_path, self.path) return self.path elif example_path.is_file(): result = self.path.joinpath(example_path.name) @@ -924,22 +963,20 @@ class Pytester: f'example "{example_path}" is not found as a file or directory' ) - Session = Session - def getnode( self, config: Config, arg: Union[str, "os.PathLike[str]"] ) -> Optional[Union[Collector, Item]]: """Return the collection node of a file. - :param _pytest.config.Config config: + :param pytest.Config config: A pytest config. See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. - :param py.path.local arg: + :param os.PathLike[str] arg: Path to the file. """ session = Session.from_config(config) assert "::" not in str(arg) - p = py.path.local(arg) + p = Path(os.path.abspath(arg)) config.hook.pytest_sessionstart(session=session) res = session.perform_collect([str(p)], genitems=False)[0] config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) @@ -951,12 +988,12 @@ class Pytester: This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to create the (configured) pytest Config instance. - :param py.path.local path: Path to the file. + :param os.PathLike[str] path: Path to the file. """ - path = py.path.local(path) + path = Path(path) config = self.parseconfigure(path) session = Session.from_config(config) - x = session.fspath.bestrelpath(path) + x = bestrelpath(session.path, path) config.hook.pytest_sessionstart(session=session) res = session.perform_collect([x], genitems=False)[0] config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) @@ -997,10 +1034,7 @@ class Pytester: for the result. :param source: The source code of the test module. - :param cmdlineargs: Any extra command line arguments to use. - - :returns: :py:class:`HookRecorder` instance of the result. """ p = self.makepyfile(source) values = list(cmdlineargs) + [p] @@ -1038,8 +1072,6 @@ class Pytester: :param no_reraise_ctrlc: Typically we reraise keyboard interrupts from the child run. If True, the KeyboardInterrupt exception is captured. - - :returns: A :py:class:`HookRecorder` instance. """ # (maybe a cpython bug?) the importlib cache sometimes isn't updated # properly between file creation and inline_run (especially if imports @@ -1077,7 +1109,7 @@ class Pytester: class reprec: # type: ignore pass - reprec.ret = ret # type: ignore + reprec.ret = ret # Typically we reraise keyboard interrupts from the child run # because it's our user requesting interruption of the testing. @@ -1138,7 +1170,7 @@ class Pytester: self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any ) -> RunResult: """Run pytest inline or in a subprocess, depending on the command line - option "--runpytest" and return a :py:class:`RunResult`.""" + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" new_args = self._ensure_basetemp(args) if self._method == "inprocess": return self.runpytest_inprocess(*new_args, **kwargs) @@ -1163,7 +1195,7 @@ class Pytester: This invokes the pytest bootstrapping code in _pytest.config to create a new :py:class:`_pytest.core.PluginManager` and call the pytest_cmdline_parse hook to create a new - :py:class:`_pytest.config.Config` instance. + :py:class:`pytest.Config` instance. If :py:attr:`plugins` has been populated they should be plugin modules to be registered with the PluginManager. @@ -1183,14 +1215,16 @@ class Pytester: def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config: """Return a new pytest configured Config instance. - Returns a new :py:class:`_pytest.config.Config` instance like + Returns a new :py:class:`pytest.Config` instance like :py:meth:`parseconfig`, but also calls the pytest_configure hook. """ config = self.parseconfig(*args) config._do_configure() return config - def getitem(self, source: str, funcname: str = "test_func") -> Item: + def getitem( + self, source: Union[str, "os.PathLike[str]"], funcname: str = "test_func" + ) -> Item: """Return the test item for a test function. Writes the source to a python file and runs pytest's collection on @@ -1210,7 +1244,7 @@ class Pytester: funcname, source, items ) - def getitems(self, source: str) -> List[Item]: + def getitems(self, source: Union[str, "os.PathLike[str]"]) -> List[Item]: """Return all test items collected from the module. Writes the source to a Python file and runs pytest's collection on @@ -1220,7 +1254,11 @@ class Pytester: return self.genitems([modcol]) def getmodulecol( - self, source: Union[str, Path], configargs=(), *, withinit: bool = False + self, + source: Union[str, "os.PathLike[str]"], + configargs=(), + *, + withinit: bool = False, ): """Return the module collection node for ``source``. @@ -1238,7 +1276,7 @@ class Pytester: Whether to also write an ``__init__.py`` file to the same directory to ensure it is a package. """ - if isinstance(source, Path): + if isinstance(source, os.PathLike): path = self.path.joinpath(source) assert not withinit, "not supported for paths" else: @@ -1254,7 +1292,7 @@ class Pytester: ) -> Optional[Union[Item, Collector]]: """Return the collection node for name from the module collection. - Searchs a module collection node for a collection node matching the + Searches a module collection node for a collection node matching the given name. :param modcol: A module collection node; see :py:meth:`getmodulecol`. @@ -1269,16 +1307,16 @@ class Pytester: def popen( self, - cmdargs, + cmdargs: Sequence[Union[str, "os.PathLike[str]"]], stdout: Union[int, TextIO] = subprocess.PIPE, stderr: Union[int, TextIO] = subprocess.PIPE, - stdin=CLOSE_STDIN, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, **kw, ): - """Invoke subprocess.Popen. + """Invoke :py:class:`subprocess.Popen`. - Calls subprocess.Popen making sure the current working directory is - in the PYTHONPATH. + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. You probably want to use :py:meth:`run` instead. """ @@ -1309,33 +1347,38 @@ class Pytester: self, *cmdargs: Union[str, "os.PathLike[str]"], timeout: Optional[float] = None, - stdin=CLOSE_STDIN, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, ) -> RunResult: """Run a command with arguments. - Run a process using subprocess.Popen saving the stdout and stderr. + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. :param cmdargs: - The sequence of arguments to pass to `subprocess.Popen()`, with path-like objects - being converted to ``str`` automatically. + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. :param timeout: The period in seconds after which to timeout and raise :py:class:`Pytester.TimeoutExpired`. :param stdin: - Optional standard input. Bytes are being send, closing - the pipe, otherwise it is passed through to ``popen``. - Defaults to ``CLOSE_STDIN``, which translates to using a pipe - (``subprocess.PIPE``) that gets closed. + Optional standard input. + + - If it is :py:attr:`CLOSE_STDIN` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. - :rtype: RunResult + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. """ __tracebackhide__ = True - # TODO: Remove type ignore in next mypy release. - # https://github.com/python/typeshed/pull/4582 - cmdargs = tuple( - os.fspath(arg) if isinstance(arg, os.PathLike) else arg for arg in cmdargs # type: ignore[misc] - ) + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) p1 = self.path.joinpath("stdout") p2 = self.path.joinpath("stderr") print("running:", *cmdargs) @@ -1394,21 +1437,17 @@ class Pytester: def _getpytestargs(self) -> Tuple[str, ...]: return sys.executable, "-mpytest" - def runpython(self, script) -> RunResult: - """Run a python script using sys.executable as interpreter. - - :rtype: RunResult - """ + def runpython(self, script: "os.PathLike[str]") -> RunResult: + """Run a python script using sys.executable as interpreter.""" return self.run(sys.executable, script) - def runpython_c(self, command): - """Run python -c "command". - - :rtype: RunResult - """ + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" return self.run(sys.executable, "-c", command) - def runpytest_subprocess(self, *args, timeout: Optional[float] = None) -> RunResult: + def runpytest_subprocess( + self, *args: Union[str, "os.PathLike[str]"], timeout: Optional[float] = None + ) -> RunResult: """Run pytest as a subprocess with given arguments. Any plugins added to the :py:attr:`plugins` list will be added using the @@ -1422,8 +1461,6 @@ class Pytester: :param timeout: The period in seconds after which to timeout and raise :py:class:`Pytester.TimeoutExpired`. - - :rtype: RunResult """ __tracebackhide__ = True p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) @@ -1475,7 +1512,7 @@ class LineComp: def assert_contains_lines(self, lines2: Sequence[str]) -> None: """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. - Lines are matched using :func:`LineMatcher.fnmatch_lines`. + Lines are matched using :func:`LineMatcher.fnmatch_lines <pytest.LineMatcher.fnmatch_lines>`. """ __tracebackhide__ = True val = self.stringio.getvalue() @@ -1485,217 +1522,6 @@ class LineComp: LineMatcher(lines1).fnmatch_lines(lines2) -@final -@attr.s(repr=False, str=False, init=False) -class Testdir: - """ - Similar to :class:`Pytester`, but this class works with legacy py.path.local objects instead. - - All methods just forward to an internal :class:`Pytester` instance, converting results - to `py.path.local` objects as necessary. - """ - - __test__ = False - - CLOSE_STDIN = Pytester.CLOSE_STDIN - TimeoutExpired = Pytester.TimeoutExpired - Session = Pytester.Session - - def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: - check_ispytest(_ispytest) - self._pytester = pytester - - @property - def tmpdir(self) -> py.path.local: - """Temporary directory where tests are executed.""" - return py.path.local(self._pytester.path) - - @property - def test_tmproot(self) -> py.path.local: - return py.path.local(self._pytester._test_tmproot) - - @property - def request(self): - return self._pytester._request - - @property - def plugins(self): - return self._pytester.plugins - - @plugins.setter - def plugins(self, plugins): - self._pytester.plugins = plugins - - @property - def monkeypatch(self) -> MonkeyPatch: - return self._pytester._monkeypatch - - def make_hook_recorder(self, pluginmanager) -> HookRecorder: - """See :meth:`Pytester.make_hook_recorder`.""" - return self._pytester.make_hook_recorder(pluginmanager) - - def chdir(self) -> None: - """See :meth:`Pytester.chdir`.""" - return self._pytester.chdir() - - def finalize(self) -> None: - """See :meth:`Pytester._finalize`.""" - return self._pytester._finalize() - - def makefile(self, ext, *args, **kwargs) -> py.path.local: - """See :meth:`Pytester.makefile`.""" - return py.path.local(str(self._pytester.makefile(ext, *args, **kwargs))) - - def makeconftest(self, source) -> py.path.local: - """See :meth:`Pytester.makeconftest`.""" - return py.path.local(str(self._pytester.makeconftest(source))) - - def makeini(self, source) -> py.path.local: - """See :meth:`Pytester.makeini`.""" - return py.path.local(str(self._pytester.makeini(source))) - - def getinicfg(self, source: str) -> SectionWrapper: - """See :meth:`Pytester.getinicfg`.""" - return self._pytester.getinicfg(source) - - def makepyprojecttoml(self, source) -> py.path.local: - """See :meth:`Pytester.makepyprojecttoml`.""" - return py.path.local(str(self._pytester.makepyprojecttoml(source))) - - def makepyfile(self, *args, **kwargs) -> py.path.local: - """See :meth:`Pytester.makepyfile`.""" - return py.path.local(str(self._pytester.makepyfile(*args, **kwargs))) - - def maketxtfile(self, *args, **kwargs) -> py.path.local: - """See :meth:`Pytester.maketxtfile`.""" - return py.path.local(str(self._pytester.maketxtfile(*args, **kwargs))) - - def syspathinsert(self, path=None) -> None: - """See :meth:`Pytester.syspathinsert`.""" - return self._pytester.syspathinsert(path) - - def mkdir(self, name) -> py.path.local: - """See :meth:`Pytester.mkdir`.""" - return py.path.local(str(self._pytester.mkdir(name))) - - def mkpydir(self, name) -> py.path.local: - """See :meth:`Pytester.mkpydir`.""" - return py.path.local(str(self._pytester.mkpydir(name))) - - def copy_example(self, name=None) -> py.path.local: - """See :meth:`Pytester.copy_example`.""" - return py.path.local(str(self._pytester.copy_example(name))) - - def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]: - """See :meth:`Pytester.getnode`.""" - return self._pytester.getnode(config, arg) - - def getpathnode(self, path): - """See :meth:`Pytester.getpathnode`.""" - return self._pytester.getpathnode(path) - - def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]: - """See :meth:`Pytester.genitems`.""" - return self._pytester.genitems(colitems) - - def runitem(self, source): - """See :meth:`Pytester.runitem`.""" - return self._pytester.runitem(source) - - def inline_runsource(self, source, *cmdlineargs): - """See :meth:`Pytester.inline_runsource`.""" - return self._pytester.inline_runsource(source, *cmdlineargs) - - def inline_genitems(self, *args): - """See :meth:`Pytester.inline_genitems`.""" - return self._pytester.inline_genitems(*args) - - def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): - """See :meth:`Pytester.inline_run`.""" - return self._pytester.inline_run( - *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc - ) - - def runpytest_inprocess(self, *args, **kwargs) -> RunResult: - """See :meth:`Pytester.runpytest_inprocess`.""" - return self._pytester.runpytest_inprocess(*args, **kwargs) - - def runpytest(self, *args, **kwargs) -> RunResult: - """See :meth:`Pytester.runpytest`.""" - return self._pytester.runpytest(*args, **kwargs) - - def parseconfig(self, *args) -> Config: - """See :meth:`Pytester.parseconfig`.""" - return self._pytester.parseconfig(*args) - - def parseconfigure(self, *args) -> Config: - """See :meth:`Pytester.parseconfigure`.""" - return self._pytester.parseconfigure(*args) - - def getitem(self, source, funcname="test_func"): - """See :meth:`Pytester.getitem`.""" - return self._pytester.getitem(source, funcname) - - def getitems(self, source): - """See :meth:`Pytester.getitems`.""" - return self._pytester.getitems(source) - - def getmodulecol(self, source, configargs=(), withinit=False): - """See :meth:`Pytester.getmodulecol`.""" - return self._pytester.getmodulecol( - source, configargs=configargs, withinit=withinit - ) - - def collect_by_name( - self, modcol: Collector, name: str - ) -> Optional[Union[Item, Collector]]: - """See :meth:`Pytester.collect_by_name`.""" - return self._pytester.collect_by_name(modcol, name) - - def popen( - self, - cmdargs, - stdout: Union[int, TextIO] = subprocess.PIPE, - stderr: Union[int, TextIO] = subprocess.PIPE, - stdin=CLOSE_STDIN, - **kw, - ): - """See :meth:`Pytester.popen`.""" - return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) - - def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: - """See :meth:`Pytester.run`.""" - return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) - - def runpython(self, script) -> RunResult: - """See :meth:`Pytester.runpython`.""" - return self._pytester.runpython(script) - - def runpython_c(self, command): - """See :meth:`Pytester.runpython_c`.""" - return self._pytester.runpython_c(command) - - def runpytest_subprocess(self, *args, timeout=None) -> RunResult: - """See :meth:`Pytester.runpytest_subprocess`.""" - return self._pytester.runpytest_subprocess(*args, timeout=timeout) - - def spawn_pytest( - self, string: str, expect_timeout: float = 10.0 - ) -> "pexpect.spawn": - """See :meth:`Pytester.spawn_pytest`.""" - return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) - - def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": - """See :meth:`Pytester.spawn`.""" - return self._pytester.spawn(cmd, expect_timeout=expect_timeout) - - def __repr__(self) -> str: - return f"<Testdir {self.tmpdir!r}>" - - def __str__(self) -> str: - return str(self.tmpdir) - - class LineMatcher: """Flexible matching of text. @@ -1827,7 +1653,7 @@ class LineMatcher: Match lines consecutively? """ if not isinstance(lines2, collections.abc.Sequence): - raise TypeError("invalid type for lines2: {}".format(type(lines2).__name__)) + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") lines2 = self._getlines(lines2) lines1 = self.lines[:] extralines = [] diff --git a/contrib/python/pytest/py3/_pytest/pytester_assertions.py b/contrib/python/pytest/py3/_pytest/pytester_assertions.py index 630c1d3331..657e4db5fc 100644 --- a/contrib/python/pytest/py3/_pytest/pytester_assertions.py +++ b/contrib/python/pytest/py3/_pytest/pytester_assertions.py @@ -4,6 +4,7 @@ # hence cannot be subject to assertion rewriting, which requires a # module to not be already imported. from typing import Dict +from typing import Optional from typing import Sequence from typing import Tuple from typing import Union @@ -42,6 +43,8 @@ def assert_outcomes( errors: int = 0, xpassed: int = 0, xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, ) -> None: """Assert that the specified outcomes appear with the respective numbers (0 means it didn't occur) in the text output from a test run.""" @@ -63,4 +66,10 @@ def assert_outcomes( "xpassed": xpassed, "xfailed": xfailed, } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected assert obtained == expected diff --git a/contrib/python/pytest/py3/_pytest/python.py b/contrib/python/pytest/py3/_pytest/python.py index f1a47d7d33..eed95b65cc 100644 --- a/contrib/python/pytest/py3/_pytest/python.py +++ b/contrib/python/pytest/py3/_pytest/python.py @@ -10,6 +10,7 @@ import warnings from collections import Counter from collections import defaultdict from functools import partial +from pathlib import Path from typing import Any from typing import Callable from typing import Dict @@ -19,14 +20,14 @@ from typing import Iterator from typing import List from typing import Mapping from typing import Optional +from typing import Pattern from typing import Sequence from typing import Set from typing import Tuple -from typing import Type from typing import TYPE_CHECKING from typing import Union -import py +import attr import _pytest from _pytest import fixtures @@ -38,6 +39,7 @@ from _pytest._code.code import TerminalRepr from _pytest._io import TerminalWriter from _pytest._io.saferepr import saferepr from _pytest.compat import ascii_escaped +from _pytest.compat import assert_never from _pytest.compat import final from _pytest.compat import get_default_arg_names from _pytest.compat import get_real_func @@ -45,8 +47,8 @@ from _pytest.compat import getimfunc from _pytest.compat import getlocation from _pytest.compat import is_async_function from _pytest.compat import is_generator +from _pytest.compat import LEGACY_PATH from _pytest.compat import NOTSET -from _pytest.compat import REGEX_TYPE from _pytest.compat import safe_getattr from _pytest.compat import safe_isclass from _pytest.compat import STRING_TYPES @@ -54,7 +56,9 @@ from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config import hookimpl from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.deprecated import INSTANCE_COLLECTOR from _pytest.fixtures import FuncFixtureInfo from _pytest.main import Session from _pytest.mark import MARK_GEN @@ -65,16 +69,22 @@ from _pytest.mark.structures import MarkDecorator from _pytest.mark.structures import normalize_mark_list from _pytest.outcomes import fail from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import import_path from _pytest.pathlib import ImportPathMismatchError from _pytest.pathlib import parts from _pytest.pathlib import visit +from _pytest.scope import Scope from _pytest.warning_types import PytestCollectionWarning from _pytest.warning_types import PytestUnhandledCoroutineWarning if TYPE_CHECKING: from typing_extensions import Literal - from _pytest.fixtures import _Scope + from _pytest.scope import _ScopeName + + +_PYTEST_DIR = Path(_pytest.__file__).parent def pytest_addoption(parser: Parser) -> None: @@ -135,8 +145,7 @@ def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: def pytest_generate_tests(metafunc: "Metafunc") -> None: for marker in metafunc.definition.iter_markers(name="parametrize"): - # TODO: Fix this type-ignore (overlapping kwargs). - metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) # type: ignore[misc] + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) def pytest_configure(config: Config) -> None: @@ -148,14 +157,14 @@ def pytest_configure(config: Config) -> None: "or a list of tuples of values if argnames specifies multiple names. " "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " "decorated test function, one with arg1=1 and another with arg1=2." - "see https://docs.pytest.org/en/stable/parametrize.html for more info " + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " "and examples.", ) config.addinivalue_line( "markers", "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " "all of the specified fixtures. see " - "https://docs.pytest.org/en/stable/fixture.html#usefixtures ", + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", ) @@ -170,7 +179,7 @@ def async_warn_and_skip(nodeid: str) -> None: msg += " - pytest-trio\n" msg += " - pytest-twisted" warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) - skip(msg="async def function and no async plugin installed (see warnings)") + skip(reason="async def function and no async plugin installed (see warnings)") @hookimpl(trylast=True) @@ -186,32 +195,31 @@ def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: return True -def pytest_collect_file( - path: py.path.local, parent: nodes.Collector -) -> Optional["Module"]: - ext = path.ext - if ext == ".py": - if not parent.session.isinitpath(path): +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): if not path_matches_patterns( - path, parent.config.getini("python_files") + ["__init__.py"] + file_path, parent.config.getini("python_files") + ["__init__.py"] ): return None - ihook = parent.session.gethookproxy(path) - module: Module = ihook.pytest_pycollect_makemodule(path=path, parent=parent) + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) return module return None -def path_matches_patterns(path: py.path.local, patterns: Iterable[str]) -> bool: +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: """Return whether path matches any of the patterns in the list of globs given.""" - return any(path.fnmatch(pattern) for pattern in patterns) + return any(fnmatch_ex(pattern, path) for pattern in patterns) -def pytest_pycollect_makemodule(path: py.path.local, parent) -> "Module": - if path.basename == "__init__.py": - pkg: Package = Package.from_parent(parent, fspath=path) +def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module": + if module_path.name == "__init__.py": + pkg: Package = Package.from_parent(parent, path=module_path) return pkg - mod: Module = Module.from_parent(parent, fspath=path) + mod: Module = Module.from_parent(parent, path=module_path) return mod @@ -250,20 +258,13 @@ def pytest_pycollect_makeitem(collector: "PyCollector", name: str, obj: object): return res -class PyobjMixin: - _ALLOW_MARKERS = True - - # Function and attributes that the mixin needs (for type-checking only). - if TYPE_CHECKING: - name: str = "" - parent: Optional[nodes.Node] = None - own_markers: List[Mark] = [] +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information - def getparent(self, cls: Type[nodes._NodeType]) -> Optional[nodes._NodeType]: - ... + as its intended to always mix in before a node + its position in the mro is unaffected""" - def listchain(self) -> List[nodes.Node]: - ... + _ALLOW_MARKERS = True @property def module(self): @@ -279,9 +280,13 @@ class PyobjMixin: @property def instance(self): - """Python instance object this node was collected from (can be None).""" - node = self.getparent(Instance) - return node.obj if node is not None else None + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a staticmethod, a class or a module. + """ + node = self.getparent(Function) + return getattr(node.obj, "__self__", None) if node is not None else None @property def obj(self): @@ -290,7 +295,7 @@ class PyobjMixin: if obj is None: self._obj = obj = self._getobj() # XXX evil hack - # used to avoid Instance collector marker duplication + # used to avoid Function marker duplication if self._ALLOW_MARKERS: self.own_markers.extend(get_unpacked_marks(self.obj)) return obj @@ -312,8 +317,6 @@ class PyobjMixin: chain.reverse() parts = [] for node in chain: - if isinstance(node, Instance): - continue name = node.name if isinstance(node, Module): name = os.path.splitext(name)[0] @@ -325,7 +328,7 @@ class PyobjMixin: parts.reverse() return ".".join(parts) - def reportinfo(self) -> Tuple[Union[py.path.local, str], int, str]: + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: # XXX caching? obj = self.obj compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) @@ -334,13 +337,13 @@ class PyobjMixin: file_path = sys.modules[obj.__module__].__file__ if file_path.endswith(".pyc"): file_path = file_path[:-1] - fspath: Union[py.path.local, str] = file_path + path: Union["os.PathLike[str]", str] = file_path lineno = compat_co_firstlineno else: - fspath, lineno = getfslineno(obj) + path, lineno = getfslineno(obj) modpath = self.getmodpath() assert isinstance(lineno, int) - return fspath, lineno, modpath + return path, lineno, modpath # As an optimization, these builtin attribute names are pre-ignored when @@ -384,10 +387,7 @@ class PyCollector(PyobjMixin, nodes.Collector): if isinstance(obj, staticmethod): # staticmethods need to be unwrapped. obj = safe_getattr(obj, "__func__", False) - return ( - safe_getattr(obj, "__call__", False) - and fixtures.getfixturemarker(obj) is None - ) + return callable(obj) and fixtures.getfixturemarker(obj) is None else: return False @@ -413,15 +413,19 @@ class PyCollector(PyobjMixin, nodes.Collector): if not getattr(self.obj, "__test__", True): return [] - # NB. we avoid random getattrs and peek in the __dict__ instead - # (XXX originally introduced from a PyPy need, still true?) + # Avoid random getattrs and peek in the __dict__ instead. dicts = [getattr(self.obj, "__dict__", {})] - for basecls in self.obj.__class__.__mro__: - dicts.append(basecls.__dict__) + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. Since Python 3.6, + # __dict__ is definition ordered. seen: Set[str] = set() - values: List[Union[nodes.Item, nodes.Collector]] = [] + dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = [] ihook = self.ihook for dic in dicts: + values: List[Union[nodes.Item, nodes.Collector]] = [] # Note: seems like the dict can change during iteration - # be careful not to remove the list() without consideration. for name, obj in list(dic.items()): @@ -439,13 +443,14 @@ class PyCollector(PyobjMixin, nodes.Collector): values.extend(res) else: values.append(res) + dict_values.append(values) - def sort_key(item): - fspath, lineno, _ = item.reportinfo() - return (str(fspath), lineno) - - values.sort(key=sort_key) - return values + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]: modulecol = self.getparent(Module) @@ -453,26 +458,32 @@ class PyCollector(PyobjMixin, nodes.Collector): module = modulecol.obj clscol = self.getparent(Class) cls = clscol and clscol.obj or None - fm = self.session._fixturemanager definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) fixtureinfo = definition._fixtureinfo + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. metafunc = Metafunc( - definition, fixtureinfo, self.config, cls=cls, module=module + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, ) methods = [] if hasattr(module, "pytest_generate_tests"): methods.append(module.pytest_generate_tests) if cls is not None and hasattr(cls, "pytest_generate_tests"): methods.append(cls().pytest_generate_tests) - self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) if not metafunc._calls: yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) else: # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs. + fm = self.session._fixturemanager fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures @@ -486,7 +497,6 @@ class PyCollector(PyobjMixin, nodes.Collector): self, name=subname, callspec=callspec, - callobj=funcobj, fixtureinfo=fixtureinfo, keywords={callspec.id: True}, originalname=name, @@ -512,12 +522,23 @@ class Module(nodes.File, PyCollector): Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with other fixtures (#517). """ + has_nose = self.config.pluginmanager.has_plugin("nose") setup_module = _get_first_non_fixture_func( self.obj, ("setUpModule", "setup_module") ) + if setup_module is None and has_nose: + # The name "setup" is too common - only treat as fixture if callable. + setup_module = _get_first_non_fixture_func(self.obj, ("setup",)) + if not callable(setup_module): + setup_module = None teardown_module = _get_first_non_fixture_func( self.obj, ("tearDownModule", "teardown_module") ) + if teardown_module is None and has_nose: + teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",)) + # Same as "setup" above - only treat as fixture if callable. + if not callable(teardown_module): + teardown_module = None if setup_module is None and teardown_module is None: return @@ -526,7 +547,7 @@ class Module(nodes.File, PyCollector): autouse=True, scope="module", # Use a unique name to speed up lookup. - name=f"xunit_setup_module_fixture_{self.obj.__name__}", + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", ) def xunit_setup_module_fixture(request) -> Generator[None, None, None]: if setup_module is not None: @@ -555,7 +576,7 @@ class Module(nodes.File, PyCollector): autouse=True, scope="function", # Use a unique name to speed up lookup. - name=f"xunit_setup_function_fixture_{self.obj.__name__}", + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", ) def xunit_setup_function_fixture(request) -> Generator[None, None, None]: if request.instance is not None: @@ -575,7 +596,7 @@ class Module(nodes.File, PyCollector): # We assume we are only called once per module. importmode = self.config.getoption("--import-mode") try: - mod = import_path(self.fspath, mode=importmode) + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) except SyntaxError as e: raise self.CollectError( ExceptionInfo.from_current().getrepr(style="short") @@ -601,19 +622,19 @@ class Module(nodes.File, PyCollector): ) formatted_tb = str(exc_repr) raise self.CollectError( - "ImportError while importing test module '{fspath}'.\n" + "ImportError while importing test module '{path}'.\n" "Hint: make sure your test modules/packages have valid Python names.\n" "Traceback:\n" - "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + "{traceback}".format(path=self.path, traceback=formatted_tb) ) from e except skip.Exception as e: if e.allow_module_level: raise raise self.CollectError( - "Using pytest.skip outside of a test is not allowed. " - "To decorate a test function, use the @pytest.mark.skip " - "or @pytest.mark.skipif decorators instead, and to skip a " - "module use `pytestmark = pytest.mark.{skip,skipif}." + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." ) from e self.config.pluginmanager.consider_module(mod) return mod @@ -622,20 +643,27 @@ class Module(nodes.File, PyCollector): class Package(Module): def __init__( self, - fspath: py.path.local, + fspath: Optional[LEGACY_PATH], parent: nodes.Collector, # NOTE: following args are unused: config=None, session=None, nodeid=None, + path=Optional[Path], ) -> None: # NOTE: Could be just the following, but kept as-is for compat. # nodes.FSCollector.__init__(self, fspath, parent=parent) session = parent.session nodes.FSCollector.__init__( - self, fspath, parent=parent, config=config, session=session, nodeid=nodeid + self, + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, ) - self.name = os.path.basename(str(fspath.dirname)) + self.name = self.path.parent.name def setup(self) -> None: # Not using fixtures to call setup_module here because autouse fixtures @@ -653,69 +681,69 @@ class Package(Module): func = partial(_call_with_optional_argument, teardown_module, self.obj) self.addfinalizer(func) - def gethookproxy(self, fspath: py.path.local): + def gethookproxy(self, fspath: "os.PathLike[str]"): warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) return self.session.gethookproxy(fspath) - def isinitpath(self, path: py.path.local) -> bool: + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) return self.session.isinitpath(path) def _recurse(self, direntry: "os.DirEntry[str]") -> bool: if direntry.name == "__pycache__": return False - path = py.path.local(direntry.path) - ihook = self.session.gethookproxy(path.dirpath()) - if ihook.pytest_ignore_collect(path=path, config=self.config): + fspath = Path(direntry.path) + ihook = self.session.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): return False norecursepatterns = self.config.getini("norecursedirs") - if any(path.check(fnmatch=pat) for pat in norecursepatterns): + if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns): return False return True def _collectfile( - self, path: py.path.local, handle_dupes: bool = True + self, fspath: Path, handle_dupes: bool = True ) -> Sequence[nodes.Collector]: assert ( - path.isfile() + fspath.is_file() ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( - path, path.isdir(), path.exists(), path.islink() + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() ) - ihook = self.session.gethookproxy(path) - if not self.session.isinitpath(path): - if ihook.pytest_ignore_collect(path=path, config=self.config): + ihook = self.session.gethookproxy(fspath) + if not self.session.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): return () if handle_dupes: keepduplicates = self.config.getoption("keepduplicates") if not keepduplicates: duplicate_paths = self.config.pluginmanager._duplicatepaths - if path in duplicate_paths: + if fspath in duplicate_paths: return () else: - duplicate_paths.add(path) + duplicate_paths.add(fspath) - return ihook.pytest_collect_file(path=path, parent=self) # type: ignore[no-any-return] + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: - this_path = self.fspath.dirpath() - init_module = this_path.join("__init__.py") - if init_module.check(file=1) and path_matches_patterns( + this_path = self.path.parent + init_module = this_path / "__init__.py" + if init_module.is_file() and path_matches_patterns( init_module, self.config.getini("python_files") ): - yield Module.from_parent(self, fspath=init_module) - pkg_prefixes: Set[py.path.local] = set() + yield Module.from_parent(self, path=init_module) + pkg_prefixes: Set[Path] = set() for direntry in visit(str(this_path), recurse=self._recurse): - path = py.path.local(direntry.path) + path = Path(direntry.path) # We will visit our own __init__.py file, in which case we skip it. if direntry.is_file(): - if direntry.name == "__init__.py" and path.dirpath() == this_path: + if direntry.name == "__init__.py" and path.parent == this_path: continue parts_ = parts(direntry.path) if any( - str(pkg_prefix) in parts_ and pkg_prefix.join("__init__.py") != path + str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path for pkg_prefix in pkg_prefixes ): continue @@ -725,7 +753,7 @@ class Package(Module): elif not direntry.is_dir(): # Broken symlink or invalid/missing file. continue - elif path.join("__init__.py").check(file=1): + elif path.joinpath("__init__.py").is_file(): pkg_prefixes.add(path) @@ -741,22 +769,26 @@ def _call_with_optional_argument(func, arg) -> None: func() -def _get_first_non_fixture_func(obj: object, names: Iterable[str]): +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]: """Return the attribute from the given object to be used as a setup/teardown xunit-style function, but only if not marked as a fixture to avoid calling it twice.""" for name in names: - meth = getattr(obj, name, None) + meth: Optional[object] = getattr(obj, name, None) if meth is not None and fixtures.getfixturemarker(meth) is None: return meth + return None class Class(PyCollector): """Collector for test methods.""" @classmethod - def from_parent(cls, parent, *, name, obj=None): + def from_parent(cls, parent, *, name, obj=None, **kw): """The public constructor.""" - return super().from_parent(name=name, parent=parent) + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: if not safe_getattr(self.obj, "__test__", True): @@ -785,7 +817,9 @@ class Class(PyCollector): self._inject_setup_class_fixture() self._inject_setup_method_fixture() - return [Instance.from_parent(self, name="()")] + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() def _inject_setup_class_fixture(self) -> None: """Inject a hidden autouse, class scoped fixture into the collected class object @@ -803,7 +837,7 @@ class Class(PyCollector): autouse=True, scope="class", # Use a unique name to speed up lookup. - name=f"xunit_setup_class_fixture_{self.obj.__qualname__}", + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", ) def xunit_setup_class_fixture(cls) -> Generator[None, None, None]: if setup_class is not None: @@ -823,8 +857,17 @@ class Class(PyCollector): Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with other fixtures (#517). """ - setup_method = _get_first_non_fixture_func(self.obj, ("setup_method",)) - teardown_method = getattr(self.obj, "teardown_method", None) + has_nose = self.config.pluginmanager.has_plugin("nose") + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + if setup_method is None and has_nose: + setup_name = "setup" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = getattr(self.obj, teardown_name, None) + if teardown_method is None and has_nose: + teardown_name = "teardown" + teardown_method = getattr(self.obj, teardown_name, None) if setup_method is None and teardown_method is None: return @@ -832,40 +875,37 @@ class Class(PyCollector): autouse=True, scope="function", # Use a unique name to speed up lookup. - name=f"xunit_setup_method_fixture_{self.obj.__qualname__}", + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", ) def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]: method = request.function if setup_method is not None: - func = getattr(self, "setup_method") + func = getattr(self, setup_name) _call_with_optional_argument(func, method) yield if teardown_method is not None: - func = getattr(self, "teardown_method") + func = getattr(self, teardown_name) _call_with_optional_argument(func, method) self.obj.__pytest_setup_method = xunit_setup_method_fixture -class Instance(PyCollector): - _ALLOW_MARKERS = False # hack, destroy later - # Instances share the object with their parents in a way - # that duplicates markers instances if not taken out - # can be removed at node structure reorganization time. +class InstanceDummy: + """Instance used to be a node type between Class and Function. It has been + removed in pytest 7.0. Some plugins exist which reference `pytest.Instance` + only to ignore it; this dummy class keeps them working. This will be removed + in pytest 8.""" - def _getobj(self): - # TODO: Improve the type of `parent` such that assert/ignore aren't needed. - assert self.parent is not None - obj = self.parent.obj # type: ignore[attr-defined] - return obj() + pass - def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: - self.session._fixturemanager.parsefactories(self) - return super().collect() - def newinstance(self): - self.obj = self._getobj() - return self.obj +# Note: module __getattr__ only works on Python>=3.7. Unfortunately +# we can't provide this deprecation warning on Python 3.6. +def __getattr__(name: str) -> object: + if name == "Instance": + warnings.warn(INSTANCE_COLLECTOR, 2) + return InstanceDummy + raise AttributeError(f"module {__name__} has no attribute {name}") def hasinit(obj: object) -> bool: @@ -883,69 +923,80 @@ def hasnew(obj: object) -> bool: @final +@attr.s(frozen=True, slots=True, auto_attribs=True) class CallSpec2: - def __init__(self, metafunc: "Metafunc") -> None: - self.metafunc = metafunc - self.funcargs: Dict[str, object] = {} - self._idlist: List[str] = [] - self.params: Dict[str, object] = {} - # Used for sorting parametrized resources. - self._arg2scopenum: Dict[str, int] = {} - self.marks: List[Mark] = [] - self.indices: Dict[str, int] = {} - - def copy(self) -> "CallSpec2": - cs = CallSpec2(self.metafunc) - cs.funcargs.update(self.funcargs) - cs.params.update(self.params) - cs.marks.extend(self.marks) - cs.indices.update(self.indices) - cs._arg2scopenum.update(self._arg2scopenum) - cs._idlist = list(self._idlist) - return cs - - def _checkargnotcontained(self, arg: str) -> None: - if arg in self.params or arg in self.funcargs: - raise ValueError(f"duplicate {arg!r}") + """A planned parameterized invocation of a test function. - def getparam(self, name: str) -> object: - try: - return self.params[name] - except KeyError as e: - raise ValueError(name) from e - - @property - def id(self) -> str: - return "-".join(map(str, self._idlist)) + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ - def setmulti2( + # arg name -> arg value which will be passed to the parametrized test + # function (direct parameterization). + funcargs: Dict[str, object] = attr.Factory(dict) + # arg name -> arg value which will be passed to a fixture of the same name + # (indirect parametrization). + params: Dict[str, object] = attr.Factory(dict) + # arg name -> arg index. + indices: Dict[str, int] = attr.Factory(dict) + # Used for sorting parametrized resources. + _arg2scope: Dict[str, Scope] = attr.Factory(dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: List[str] = attr.Factory(list) + # Marks which will be applied to the item. + marks: List[Mark] = attr.Factory(list) + + def setmulti( self, + *, valtypes: Mapping[str, "Literal['params', 'funcargs']"], - argnames: Sequence[str], + argnames: Iterable[str], valset: Iterable[object], id: str, marks: Iterable[Union[Mark, MarkDecorator]], - scopenum: int, + scope: Scope, param_index: int, - ) -> None: + ) -> "CallSpec2": + funcargs = self.funcargs.copy() + params = self.params.copy() + indices = self.indices.copy() + arg2scope = self._arg2scope.copy() for arg, val in zip(argnames, valset): - self._checkargnotcontained(arg) + if arg in params or arg in funcargs: + raise ValueError(f"duplicate {arg!r}") valtype_for_arg = valtypes[arg] if valtype_for_arg == "params": - self.params[arg] = val + params[arg] = val elif valtype_for_arg == "funcargs": - self.funcargs[arg] = val - else: # pragma: no cover - assert False, f"Unhandled valtype for arg: {valtype_for_arg}" - self.indices[arg] = param_index - self._arg2scopenum[arg] = scopenum - self._idlist.append(id) - self.marks.extend(normalize_mark_list(marks)) + funcargs[arg] = val + else: + assert_never(valtype_for_arg) + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + funcargs=funcargs, + params=params, + arg2scope=arg2scope, + indices=indices, + idlist=[*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) @final class Metafunc: - """Objects passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. + """Objects passed to the :hook:`pytest_generate_tests` hook. They help to inspect a test function and to generate tests according to test configuration or values specified in the class or module where a @@ -959,11 +1010,15 @@ class Metafunc: config: Config, cls=None, module=None, + *, + _ispytest: bool = False, ) -> None: + check_ispytest(_ispytest) + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. self.definition = definition - #: Access to the :class:`_pytest.config.Config` object for the test session. + #: Access to the :class:`pytest.Config` object for the test session. self.config = config #: The module object where the test function is defined in. @@ -978,9 +1033,11 @@ class Metafunc: #: Class object where the test function is defined in or ``None``. self.cls = cls - self._calls: List[CallSpec2] = [] self._arg2fixturedefs = fixtureinfo.name2fixturedefs + # Result of parametrize(). + self._calls: List[CallSpec2] = [] + def parametrize( self, argnames: Union[str, List[str], Tuple[str, ...]], @@ -992,14 +1049,23 @@ class Metafunc: Callable[[Any], Optional[object]], ] ] = None, - scope: "Optional[_Scope]" = None, + scope: "Optional[_ScopeName]" = None, *, _param_mark: Optional[Mark] = None, ) -> None: """Add new invocations to the underlying test function using the list - of argvalues for the given argnames. Parametrization is performed - during the collection phase. If you need to setup expensive resources - see about setting indirect to do it rather at test setup time. + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather than at test setup time. + + Can be called multiple times, in which case each call parametrizes all + previous parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] :param argnames: A comma-separated string denoting one or more argument names, or @@ -1048,8 +1114,6 @@ class Metafunc: It will also override any fixture-function defined scope, allowing to set a dynamic scope using test context or configuration. """ - from _pytest.fixtures import scope2index - argnames, parameters = ParameterSet._for_parametrize( argnames, argvalues, @@ -1065,8 +1129,12 @@ class Metafunc: pytrace=False, ) - if scope is None: - scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) self._validate_if_using_arg_names(argnames, indirect) @@ -1086,25 +1154,20 @@ class Metafunc: if _param_mark and _param_mark._param_ids_from and generated_ids is None: object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) - scopenum = scope2index( - scope, descr=f"parametrize() call in {self.function.__name__}" - ) - # Create the new calls: if we are parametrize() multiple times (by applying the decorator # more than once) then we accumulate those calls generating the cartesian product # of all calls. newcalls = [] - for callspec in self._calls or [CallSpec2(self)]: + for callspec in self._calls or [CallSpec2()]: for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): - newcallspec = callspec.copy() - newcallspec.setmulti2( - arg_values_types, - argnames, - param_set.values, - param_id, - param_set.marks, - scopenum, - param_index, + newcallspec = callspec.setmulti( + valtypes=arg_values_types, + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, ) newcalls.append(newcallspec) self._calls = newcalls @@ -1180,7 +1243,9 @@ class Metafunc: return new_ids def _resolve_arg_value_types( - self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]], + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], ) -> Dict[str, "Literal['params', 'funcargs']"]: """Resolve if each parametrized argument must be considered a parameter to a fixture or a "funcarg" to the function, based on the @@ -1218,7 +1283,9 @@ class Metafunc: return valtypes def _validate_if_using_arg_names( - self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]], + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], ) -> None: """Check if all argnames are being used, by default values, or directly/indirectly. @@ -1252,7 +1319,7 @@ def _find_parametrized_scope( argnames: Sequence[str], arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], indirect: Union[bool, Sequence[str]], -) -> "fixtures._Scope": +) -> Scope: """Find the most appropriate scope for a parametrized call based on its arguments. When there's at least one direct argument, always use "function" scope. @@ -1270,17 +1337,14 @@ def _find_parametrized_scope( if all_arguments_are_fixtures: fixturedefs = arg2fixturedefs or {} used_scopes = [ - fixturedef[0].scope + fixturedef[0]._scope for name, fixturedef in fixturedefs.items() if name in argnames ] - if used_scopes: - # Takes the most narrow scope from used fixtures. - for scope in reversed(fixtures.scopes): - if scope in used_scopes: - return scope + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) - return "function" + return Scope.Function def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str: @@ -1323,9 +1387,9 @@ def _idval( if isinstance(val, STRING_TYPES): return _ascii_escaped_by_config(val, config) - elif val is None or isinstance(val, (float, int, bool)): + elif val is None or isinstance(val, (float, int, bool, complex)): return str(val) - elif isinstance(val, REGEX_TYPE): + elif isinstance(val, Pattern): return ascii_escaped(val.pattern) elif val is NOTSET: # Fallback to default. Note that NOTSET is an enum.Enum. @@ -1416,12 +1480,22 @@ def idmaker( # Suffix non-unique IDs to make them unique. for index, test_id in enumerate(resolved_ids): if test_id_counts[test_id] > 1: - resolved_ids[index] = "{}{}".format(test_id, test_id_suffixes[test_id]) + resolved_ids[index] = f"{test_id}{test_id_suffixes[test_id]}" test_id_suffixes[test_id] += 1 return resolved_ids +def _pretty_fixture_path(func) -> str: + cwd = Path.cwd() + loc = Path(getlocation(func, str(cwd))) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(cwd, loc) + + def show_fixtures_per_test(config): from _pytest.main import wrap_session @@ -1432,27 +1506,27 @@ def _show_fixtures_per_test(config: Config, session: Session) -> None: import _pytest.config session.perform_collect() - curdir = py.path.local() + curdir = Path.cwd() tw = _pytest.config.create_terminal_writer(config) verbose = config.getvalue("verbose") - def get_best_relpath(func): + def get_best_relpath(func) -> str: loc = getlocation(func, str(curdir)) - return curdir.bestrelpath(py.path.local(loc)) + return bestrelpath(curdir, Path(loc)) def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None: argname = fixture_def.argname if verbose <= 0 and argname.startswith("_"): return - if verbose > 0: - bestrel = get_best_relpath(fixture_def.func) - funcargspec = f"{argname} -- {bestrel}" - else: - funcargspec = argname - tw.line(funcargspec, green=True) + prettypath = _pretty_fixture_path(fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") fixture_doc = inspect.getdoc(fixture_def.func) if fixture_doc: - write_docstring(tw, fixture_doc) + write_docstring( + tw, fixture_doc.split("\n\n")[0] if verbose <= 0 else fixture_doc + ) else: tw.line(" no docstring available", red=True) @@ -1465,7 +1539,7 @@ def _show_fixtures_per_test(config: Config, session: Session) -> None: tw.line() tw.sep("-", f"fixtures used by {item.name}") # TODO: Fix this type ignore. - tw.sep("-", "({})".format(get_best_relpath(item.function))) # type: ignore[attr-defined] + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] # dict key not used in loop but needed for sorting. for _, fixturedefs in sorted(info.name2fixturedefs.items()): assert fixturedefs is not None @@ -1488,7 +1562,7 @@ def _showfixtures_main(config: Config, session: Session) -> None: import _pytest.config session.perform_collect() - curdir = py.path.local() + curdir = Path.cwd() tw = _pytest.config.create_terminal_writer(config) verbose = config.getvalue("verbose") @@ -1510,7 +1584,7 @@ def _showfixtures_main(config: Config, session: Session) -> None: ( len(fixturedef.baseid), fixturedef.func.__module__, - curdir.bestrelpath(py.path.local(loc)), + _pretty_fixture_path(fixturedef.func), fixturedef.argname, fixturedef, ) @@ -1518,26 +1592,24 @@ def _showfixtures_main(config: Config, session: Session) -> None: available.sort() currentmodule = None - for baseid, module, bestrel, argname, fixturedef in available: + for baseid, module, prettypath, argname, fixturedef in available: if currentmodule != module: if not module.startswith("_pytest."): tw.line() tw.sep("-", f"fixtures defined from {module}") currentmodule = module - if verbose <= 0 and argname[0] == "_": + if verbose <= 0 and argname.startswith("_"): continue - tw.write(argname, green=True) + tw.write(f"{argname}", green=True) if fixturedef.scope != "function": tw.write(" [%s scope]" % fixturedef.scope, cyan=True) - if verbose > 0: - tw.write(" -- %s" % bestrel, yellow=True) + tw.write(f" -- {prettypath}", yellow=True) tw.write("\n") - loc = getlocation(fixturedef.func, str(curdir)) doc = inspect.getdoc(fixturedef.func) if doc: - write_docstring(tw, doc) + write_docstring(tw, doc.split("\n\n")[0] if verbose <= 0 else doc) else: - tw.line(f" {loc}: no docstring available", red=True) + tw.line(" no docstring available", red=True) tw.line() @@ -1549,26 +1621,26 @@ def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: class Function(PyobjMixin, nodes.Item): """An Item responsible for setting up and executing a Python test function. - param name: + :param name: The full function name, including any decorations like those added by parametrization (``my_func[my_param]``). - param parent: + :param parent: The parent Node. - param config: + :param config: The pytest Config object. - param callspec: + :param callspec: If given, this is function has been parametrized and the callspec contains meta information about the parametrization. - param callobj: + :param callobj: If given, the object which will be called when the Function is invoked, otherwise the callobj will be obtained from ``parent`` using ``originalname``. - param keywords: + :param keywords: Keywords bound to the function object for "-k" matching. - param session: + :param session: The pytest Session object. - param fixtureinfo: + :param fixtureinfo: Fixture information already resolved at this fixture node.. - param originalname: + :param originalname: The attribute name to use for accessing the underlying function object. Defaults to ``name``. Set this if name is different from the original name, for example when it contains decorations like those added by parametrization @@ -1615,7 +1687,7 @@ class Function(PyobjMixin, nodes.Item): # this will be redeemed later for mark in callspec.marks: # feel free to cry, this was broken for years before - # and keywords cant fix it per design + # and keywords can't fix it per design self.keywords[mark.name] = mark self.own_markers.extend(normalize_mark_list(callspec.marks)) if keywords: @@ -1656,7 +1728,12 @@ class Function(PyobjMixin, nodes.Item): def _getobj(self): assert self.parent is not None - return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined] + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + parent_obj = self.parent.newinstance() + else: + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) @property def _pyfuncitem(self): @@ -1668,9 +1745,6 @@ class Function(PyobjMixin, nodes.Item): self.ihook.pytest_pyfunc_call(pyfuncitem=self) def setup(self) -> None: - if isinstance(self.parent, Instance): - self.parent.newinstance() - self.obj = self._getobj() self._request._fillfixtures() def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: @@ -1696,7 +1770,8 @@ class Function(PyobjMixin, nodes.Item): # TODO: Type ignored -- breaks Liskov Substitution. def repr_failure( # type: ignore[override] - self, excinfo: ExceptionInfo[BaseException], + self, + excinfo: ExceptionInfo[BaseException], ) -> Union[str, TerminalRepr]: style = self.config.getoption("tbstyle", "auto") if style == "auto": diff --git a/contrib/python/pytest/py3/_pytest/python_api.py b/contrib/python/pytest/py3/_pytest/python_api.py index 81ce4f8953..cb72fde1e1 100644 --- a/contrib/python/pytest/py3/_pytest/python_api.py +++ b/contrib/python/pytest/py3/_pytest/python_api.py @@ -1,7 +1,5 @@ import math import pprint -from collections.abc import Iterable -from collections.abc import Mapping from collections.abc import Sized from decimal import Decimal from numbers import Complex @@ -10,9 +8,13 @@ from typing import Any from typing import Callable from typing import cast from typing import Generic +from typing import Iterable +from typing import List +from typing import Mapping from typing import Optional from typing import overload from typing import Pattern +from typing import Sequence from typing import Tuple from typing import Type from typing import TYPE_CHECKING @@ -38,6 +40,32 @@ def _non_numeric_type_error(value, at: Optional[str]) -> TypeError: ) +def _compare_approx( + full_object: object, + message_data: Sequence[Tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> List[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + # builtin pytest.approx helper @@ -60,11 +88,24 @@ class ApproxBase: def __repr__(self) -> str: raise NotImplementedError + def _repr_compare(self, other_side: Any) -> List[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + def __eq__(self, actual) -> bool: return all( a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) ) + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + # Ignore type because of https://github.com/python/mypy/issues/4266. __hash__ = None # type: ignore @@ -72,6 +113,8 @@ class ApproxBase: return not (actual == self) def _approx_scalar(self, x) -> "ApproxScalar": + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) def _yield_comparisons(self, actual): @@ -93,7 +136,7 @@ class ApproxBase: def _recursive_list_map(f, x): if isinstance(x, list): - return list(_recursive_list_map(f, xi) for xi in x) + return [_recursive_list_map(f, xi) for xi in x] else: return f(x) @@ -105,6 +148,66 @@ class ApproxNumpy(ApproxBase): list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) return f"approx({list_scalars!r})" + def _repr_compare(self, other_side: "ndarray") -> List[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: List[Any], nd_index: Tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_list = _recursive_list_map( + self._approx_scalar, self.expected.tolist() + ) + + if np_array_shape != other_side.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_list, index) + other_value = get_value_from_nested_list(other_side, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side, index)), + str(get_value_from_nested_list(approx_side_as_list, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + def __eq__(self, actual) -> bool: import numpy as np @@ -119,7 +222,7 @@ class ApproxNumpy(ApproxBase): if not np.isscalar(actual) and actual.shape != self.expected.shape: return False - return ApproxBase.__eq__(self, actual) + return super().__eq__(actual) def _yield_comparisons(self, actual): import numpy as np @@ -145,6 +248,44 @@ class ApproxMapping(ApproxBase): {k: self._approx_scalar(v) for k, v in self.expected.items()} ) + def _repr_compare(self, other_side: Mapping[object, float]) -> List[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + max_rel_diff = max( + max_rel_diff, + abs((approx_value.expected - other_value) / approx_value.expected), + ) + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + def __eq__(self, actual) -> bool: try: if set(actual.keys()) != set(self.expected.keys()): @@ -152,7 +293,7 @@ class ApproxMapping(ApproxBase): except AttributeError: return False - return ApproxBase.__eq__(self, actual) + return super().__eq__(actual) def _yield_comparisons(self, actual): for k in self.expected.keys(): @@ -177,13 +318,55 @@ class ApproxSequencelike(ApproxBase): seq_type(self._approx_scalar(x) for x in self.expected) ) + def _repr_compare(self, other_side: Sequence[float]) -> List[str]: + import math + import numpy as np + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_list_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = np.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + def __eq__(self, actual) -> bool: try: if len(actual) != len(self.expected): return False except TypeError: return False - return ApproxBase.__eq__(self, actual) + return super().__eq__(actual) def _yield_comparisons(self, actual): return zip(actual, self.expected) @@ -210,7 +393,6 @@ class ApproxScalar(ApproxBase): For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠±180°``. """ - # Don't show a tolerance for values that aren't compared using # tolerances, i.e. non-numerics and infinities. Need to call abs to # handle complex numbers, e.g. (inf + 1j). @@ -317,7 +499,7 @@ class ApproxScalar(ApproxBase): if relative_tolerance < 0: raise ValueError( - f"relative tolerance can't be negative: {absolute_tolerance}" + f"relative tolerance can't be negative: {relative_tolerance}" ) if math.isnan(relative_tolerance): raise ValueError("relative tolerance can't be NaN.") @@ -337,14 +519,12 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: """Assert that two numbers (or two sets of numbers) are equal to each other within some tolerance. - Due to the `intricacies of floating-point arithmetic`__, numbers that we + Due to the :std:doc:`tutorial/floatingpoint`, numbers that we would intuitively expect to be equal are not always so:: >>> 0.1 + 0.2 == 0.3 False - __ https://docs.python.org/3/tutorial/floatingpoint.html - This problem is commonly encountered when writing tests, e.g. when making sure that floating-point values are what you expect them to be. One way to deal with this problem is to assert that two floating-point numbers are @@ -449,27 +629,22 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor ``b`` is a "reference value"). You have to specify an absolute tolerance if you want to compare to ``0.0`` because there is no tolerance by - default. `More information...`__ - - __ https://docs.python.org/3/library/math.html#math.isclose + default. More information: :py:func:`math.isclose`. - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference between ``a`` and ``b`` is less that the sum of the relative tolerance w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance is only calculated w.r.t. ``b``, this test is asymmetric and you can think of ``b`` as the reference value. Support for comparing sequences - is provided by ``numpy.allclose``. `More information...`__ - - __ https://numpy.org/doc/stable/reference/generated/numpy.isclose.html + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` are within an absolute tolerance of ``1e-7``. No relative tolerance is - considered and the absolute tolerance cannot be changed, so this function - is not appropriate for very large or very small numbers. Also, it's only - available in subclasses of ``unittest.TestCase`` and it's ugly because it - doesn't follow PEP8. `More information...`__ - - __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. @@ -478,11 +653,17 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: special case that you explicitly specify an absolute tolerance but not a relative tolerance, only the absolute tolerance is considered. + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + .. warning:: .. versionchanged:: 3.2 - In order to avoid inconsistent behavior, ``TypeError`` is + In order to avoid inconsistent behavior, :py:exc:`TypeError` is raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. The example below illustrates the problem:: @@ -492,9 +673,7 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to comparison. This is because the call hierarchy of rich comparisons - follows a fixed behavior. `More information...`__ - - __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + follows a fixed behavior. More information: :py:meth:`object.__ge__` .. versionchanged:: 3.7.1 ``approx`` raises ``TypeError`` when it encounters a dict value or @@ -571,48 +750,46 @@ def _as_numpy_array(obj: object) -> Optional["ndarray"]: # builtin pytest.raises helper -_E = TypeVar("_E", bound=BaseException) +E = TypeVar("E", bound=BaseException) @overload def raises( - expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]] = ..., -) -> "RaisesContext[_E]": +) -> "RaisesContext[E]": ... @overload def raises( - expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any, -) -> _pytest._code.ExceptionInfo[_E]: +) -> _pytest._code.ExceptionInfo[E]: ... def raises( - expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], *args: Any, **kwargs: Any -) -> Union["RaisesContext[_E]", _pytest._code.ExceptionInfo[_E]]: + expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any +) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]: r"""Assert that a code block/function call raises ``expected_exception`` or raise a failure exception otherwise. :kwparam match: If specified, a string containing a regular expression, or a regular expression object, that is tested against the string - representation of the exception using ``re.search``. To match a literal - string that may contain `special characters`__, the pattern can - first be escaped with ``re.escape``. + representation of the exception using :py:func:`re.search`. To match a literal + string that may contain :std:ref:`special characters <re-syntax>`, the pattern can + first be escaped with :py:func:`re.escape`. - (This is only used when ``pytest.raises`` is used as a context manager, + (This is only used when :py:func:`pytest.raises` is used as a context manager, and passed through to the function otherwise. - When using ``pytest.raises`` as a function, you can use: + When using :py:func:`pytest.raises` as a function, you can use: ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) - __ https://docs.python.org/3/library/re.html#regular-expression-syntax - .. currentmodule:: _pytest._code Use ``pytest.raises`` as a context manager, which will capture the exception of the given @@ -709,11 +886,11 @@ def raises( __tracebackhide__ = True if isinstance(expected_exception, type): - excepted_exceptions: Tuple[Type[_E], ...] = (expected_exception,) + excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,) else: excepted_exceptions = expected_exception for exc in excepted_exceptions: - if not isinstance(exc, type) or not issubclass(exc, BaseException): # type: ignore[unreachable] + if not isinstance(exc, type) or not issubclass(exc, BaseException): msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ raise TypeError(msg.format(not_a)) @@ -731,9 +908,7 @@ def raises( else: func = args[0] if not callable(func): - raise TypeError( - "{!r} object (type: {}) must be callable".format(func, type(func)) - ) + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") try: func(*args[1:], **kwargs) except expected_exception as e: @@ -750,19 +925,19 @@ raises.Exception = fail.Exception # type: ignore @final -class RaisesContext(Generic[_E]): +class RaisesContext(Generic[E]): def __init__( self, - expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], + expected_exception: Union[Type[E], Tuple[Type[E], ...]], message: str, match_expr: Optional[Union[str, Pattern[str]]] = None, ) -> None: self.expected_exception = expected_exception self.message = message self.match_expr = match_expr - self.excinfo: Optional[_pytest._code.ExceptionInfo[_E]] = None + self.excinfo: Optional[_pytest._code.ExceptionInfo[E]] = None - def __enter__(self) -> _pytest._code.ExceptionInfo[_E]: + def __enter__(self) -> _pytest._code.ExceptionInfo[E]: self.excinfo = _pytest._code.ExceptionInfo.for_later() return self.excinfo @@ -779,7 +954,7 @@ class RaisesContext(Generic[_E]): if not issubclass(exc_type, self.expected_exception): return False # Cast to narrow the exception type now that it's verified. - exc_info = cast(Tuple[Type[_E], _E, TracebackType], (exc_type, exc_val, exc_tb)) + exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb)) self.excinfo.fill_unfilled(exc_info) if self.match_expr is not None: self.excinfo.match(self.match_expr) diff --git a/contrib/python/pytest/py3/_pytest/python_path.py b/contrib/python/pytest/py3/_pytest/python_path.py new file mode 100644 index 0000000000..cceabbca12 --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/python_path.py @@ -0,0 +1,24 @@ +import sys + +import pytest +from pytest import Config +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[]) + + +@pytest.hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(early_config.getini("pythonpath")): + sys.path.insert(0, str(path)) + + +@pytest.hookimpl(trylast=True) +def pytest_unconfigure(config: Config) -> None: + for path in config.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) diff --git a/contrib/python/pytest/py3/_pytest/recwarn.py b/contrib/python/pytest/py3/_pytest/recwarn.py index d872d9da40..175b571a80 100644 --- a/contrib/python/pytest/py3/_pytest/recwarn.py +++ b/contrib/python/pytest/py3/_pytest/recwarn.py @@ -17,6 +17,7 @@ from typing import Union from _pytest.compat import final from _pytest.deprecated import check_ispytest +from _pytest.deprecated import WARNS_NONE_ARG from _pytest.fixtures import fixture from _pytest.outcomes import fail @@ -28,7 +29,7 @@ T = TypeVar("T") def recwarn() -> Generator["WarningsRecorder", None, None]: """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - See http://docs.python.org/library/warnings.html for information + See https://docs.python.org/library/how-to/capture-warnings.html for information on warning categories. """ wrec = WarningsRecorder(_ispytest=True) @@ -83,7 +84,7 @@ def deprecated_call( @overload def warns( - expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = ..., *, match: Optional[Union[str, Pattern[str]]] = ..., ) -> "WarningsChecker": @@ -92,7 +93,7 @@ def warns( @overload def warns( - expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]], func: Callable[..., T], *args: Any, **kwargs: Any, @@ -101,7 +102,7 @@ def warns( def warns( - expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]], + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = Warning, *args: Any, match: Optional[Union[str, Pattern[str]]] = None, **kwargs: Any, @@ -135,7 +136,7 @@ def warns( ... warnings.warn("this is not here", UserWarning) Traceback (most recent call last): ... - Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... """ __tracebackhide__ = True @@ -149,9 +150,7 @@ def warns( else: func = args[0] if not callable(func): - raise TypeError( - "{!r} object (type: {}) must be callable".format(func, type(func)) - ) + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") with WarningsChecker(expected_warning, _ispytest=True): return func(*args[1:], **kwargs) @@ -234,7 +233,7 @@ class WarningsChecker(WarningsRecorder): self, expected_warning: Optional[ Union[Type[Warning], Tuple[Type[Warning], ...]] - ] = None, + ] = Warning, match_expr: Optional[Union[str, Pattern[str]]] = None, *, _ispytest: bool = False, @@ -244,6 +243,7 @@ class WarningsChecker(WarningsRecorder): msg = "exceptions must be derived from Warning, not %s" if expected_warning is None: + warnings.warn(WARNS_NONE_ARG, stacklevel=4) expected_warning_tup = None elif isinstance(expected_warning, tuple): for exc in expected_warning: @@ -274,7 +274,7 @@ class WarningsChecker(WarningsRecorder): if not any(issubclass(r.category, self.expected_warning) for r in self): __tracebackhide__ = True fail( - "DID NOT WARN. No warnings of type {} was emitted. " + "DID NOT WARN. No warnings of type {} were emitted. " "The list of emitted warnings is: {}.".format( self.expected_warning, [each.message for each in self] ) @@ -287,7 +287,7 @@ class WarningsChecker(WarningsRecorder): else: fail( "DID NOT WARN. No warnings of type {} matching" - " ('{}') was emitted. The list of emitted warnings" + " ('{}') were emitted. The list of emitted warnings" " is: {}.".format( self.expected_warning, self.match_expr, diff --git a/contrib/python/pytest/py3/_pytest/reports.py b/contrib/python/pytest/py3/_pytest/reports.py index 58f12517c5..a68e68bc52 100644 --- a/contrib/python/pytest/py3/_pytest/reports.py +++ b/contrib/python/pytest/py3/_pytest/reports.py @@ -1,5 +1,5 @@ +import os from io import StringIO -from pathlib import Path from pprint import pprint from typing import Any from typing import cast @@ -15,7 +15,6 @@ from typing import TypeVar from typing import Union import attr -import py from _pytest._code.code import ExceptionChainRepr from _pytest._code.code import ExceptionInfo @@ -65,6 +64,7 @@ class BaseReport: ] sections: List[Tuple[str, str]] nodeid: str + outcome: "Literal['passed', 'failed', 'skipped']" def __init__(self, **kw: Any) -> None: self.__dict__.update(kw) @@ -76,7 +76,9 @@ class BaseReport: def toterminal(self, out: TerminalWriter) -> None: if hasattr(self, "node"): - out.line(getworkerinfoline(self.node)) + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) longrepr = self.longrepr if longrepr is None: @@ -141,12 +143,24 @@ class BaseReport: content for (prefix, content) in self.get_sections("Captured stderr") ) - passed = property(lambda x: x.outcome == "passed") - failed = property(lambda x: x.outcome == "failed") - skipped = property(lambda x: x.outcome == "skipped") + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" @property def fspath(self) -> str: + """The path portion of the reported node, as a string.""" return self.nodeid.split("::")[0] @property @@ -229,7 +243,10 @@ def _report_unserialization_failure( @final class TestReport(BaseReport): """Basic test report object (also used for setup and teardown calls if - they fail).""" + they fail). + + Reports can contain arbitrary extra attributes. + """ __test__ = False @@ -273,10 +290,10 @@ class TestReport(BaseReport): #: defined properties of the test. self.user_properties = list(user_properties or []) - #: List of pairs ``(str, str)`` of extra information which needs to - #: marshallable. Used by pytest to add captured text - #: from ``stdout`` and ``stderr``, but may be used by other plugins - #: to add arbitrary information to reports. + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. self.sections = list(sections) #: Time it took to run just the test. @@ -307,7 +324,7 @@ class TestReport(BaseReport): Tuple[str, int, str], str, TerminalRepr, - ] = (None) + ] = None else: if not isinstance(excinfo, ExceptionInfo): outcome = "failed" @@ -315,7 +332,12 @@ class TestReport(BaseReport): elif isinstance(excinfo.value, skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() - longrepr = (str(r.path), r.lineno, r.message) + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) else: outcome = "failed" if call.when == "call": @@ -341,15 +363,20 @@ class TestReport(BaseReport): @final class CollectReport(BaseReport): - """Collection report object.""" + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ when = "collect" def __init__( self, nodeid: str, - outcome: "Literal['passed', 'skipped', 'failed']", - longrepr, + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], result: Optional[List[Union[Item, Collector]]], sections: Iterable[Tuple[str, str]] = (), **extra, @@ -366,11 +393,10 @@ class CollectReport(BaseReport): #: The collected items and collection nodes. self.result = result or [] - #: List of pairs ``(str, str)`` of extra information which needs to - #: marshallable. - # Used by pytest to add captured text : from ``stdout`` and ``stderr``, - # but may be used by other plugins : to add arbitrary information to - # reports. + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. self.sections = list(sections) self.__dict__.update(extra) @@ -484,8 +510,8 @@ def _report_to_json(report: BaseReport) -> Dict[str, Any]: else: d["longrepr"] = report.longrepr for name in d: - if isinstance(d[name], (py.path.local, Path)): - d[name] = str(d[name]) + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) elif name == "result": d[name] = None # for now return d diff --git a/contrib/python/pytest/py3/_pytest/runner.py b/contrib/python/pytest/py3/_pytest/runner.py index 794690ddb0..e43dd2dc81 100644 --- a/contrib/python/pytest/py3/_pytest/runner.py +++ b/contrib/python/pytest/py3/_pytest/runner.py @@ -2,6 +2,7 @@ import bdb import os import sys +import warnings from typing import Callable from typing import cast from typing import Dict @@ -26,10 +27,13 @@ from _pytest._code.code import ExceptionInfo from _pytest._code.code import TerminalRepr from _pytest.compat import final from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import UNITTEST_SKIP_DURING_COLLECTION from _pytest.nodes import Collector from _pytest.nodes import Item from _pytest.nodes import Node from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException from _pytest.outcomes import Skipped from _pytest.outcomes import TEST_OUTCOME @@ -100,7 +104,7 @@ def pytest_sessionstart(session: "Session") -> None: def pytest_sessionfinish(session: "Session") -> None: - session._setupstate.teardown_all() + session._setupstate.teardown_exact(None) def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool: @@ -116,6 +120,8 @@ def runtestprotocol( ) -> List[TestReport]: hasrequest = hasattr(item, "_request") if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. item._initrequest() # type: ignore[attr-defined] rep = call_and_report(item, "setup", log) reports = [rep] @@ -147,7 +153,7 @@ def show_test_item(item: Item) -> None: def pytest_runtest_setup(item: Item) -> None: _update_current_test_var(item, "setup") - item.session._setupstate.prepare(item) + item.session._setupstate.setup(item) def pytest_runtest_call(item: Item) -> None: @@ -172,7 +178,7 @@ def pytest_runtest_call(item: Item) -> None: def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None: _update_current_test_var(item, "teardown") - item.session._setupstate.teardown_exact(item, nextitem) + item.session._setupstate.teardown_exact(nextitem) _update_current_test_var(item, None) @@ -260,34 +266,47 @@ TResult = TypeVar("TResult", covariant=True) @final -@attr.s(repr=False) +@attr.s(repr=False, init=False, auto_attribs=True) class CallInfo(Generic[TResult]): - """Result/Exception info a function invocation. - - :param T result: - The return value of the call, if it didn't raise. Can only be - accessed if excinfo is None. - :param Optional[ExceptionInfo] excinfo: - The captured exception of the call, if it raised. - :param float start: - The system time when the call started, in seconds since the epoch. - :param float stop: - The system time when the call ended, in seconds since the epoch. - :param float duration: - The call duration, in seconds. - :param str when: - The context of invocation: "setup", "call", "teardown", ... - """ - - _result = attr.ib(type="Optional[TResult]") - excinfo = attr.ib(type=Optional[ExceptionInfo[BaseException]]) - start = attr.ib(type=float) - stop = attr.ib(type=float) - duration = attr.ib(type=float) - when = attr.ib(type="Literal['collect', 'setup', 'call', 'teardown']") + """Result/Exception info of a function invocation.""" + + _result: Optional[TResult] + #: The captured exception of the call, if it raised. + excinfo: Optional[ExceptionInfo[BaseException]] + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: "Literal['collect', 'setup', 'call', 'teardown']" + + def __init__( + self, + result: Optional[TResult], + excinfo: Optional[ExceptionInfo[BaseException]], + start: float, + stop: float, + duration: float, + when: "Literal['collect', 'setup', 'call', 'teardown']", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when @property def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ if self.excinfo is not None: raise AttributeError(f"{self!r} has no valid result") # The cast is safe because an exception wasn't raised, hence @@ -304,6 +323,16 @@ class CallInfo(Generic[TResult]): Union[Type[BaseException], Tuple[Type[BaseException], ...]] ] = None, ) -> "CallInfo[TResult]": + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ excinfo = None start = timing.time() precise_start = timing.perf_counter() @@ -325,6 +354,7 @@ class CallInfo(Generic[TResult]): when=when, result=result, excinfo=excinfo, + _ispytest=True, ) def __repr__(self) -> str: @@ -349,6 +379,11 @@ def pytest_make_collect_report(collector: Collector) -> CollectReport: # Type ignored because unittest is loaded dynamically. skip_exceptions.append(unittest.SkipTest) # type: ignore if isinstance(call.excinfo.value, tuple(skip_exceptions)): + if unittest is not None and isinstance( + call.excinfo.value, unittest.SkipTest # type: ignore[attr-defined] + ): + warnings.warn(UNITTEST_SKIP_DURING_COLLECTION, stacklevel=2) + outcome = "skipped" r_ = collector._repr_failure_py(call.excinfo, "line") assert isinstance(r_, ExceptionChainRepr), repr(r_) @@ -369,87 +404,138 @@ def pytest_make_collect_report(collector: Collector) -> CollectReport: class SetupState: - """Shared state for setting up/tearing down test items or collectors.""" + """Shared state for setting up/tearing down test items or collectors + in a session. - def __init__(self): - self.stack: List[Node] = [] - self._finalizers: Dict[Node, List[Callable[[], object]]] = {} + Suppose we have a collection tree as follows: - def addfinalizer(self, finalizer: Callable[[], object], colitem) -> None: - """Attach a finalizer to the given colitem.""" - assert colitem and not isinstance(colitem, tuple) - assert callable(finalizer) - # assert colitem in self.stack # some unit tests don't setup stack :/ - self._finalizers.setdefault(colitem, []).append(finalizer) + <Session session> + <Module mod1> + <Function item1> + <Module mod2> + <Function item2> - def _pop_and_teardown(self): - colitem = self.stack.pop() - self._teardown_with_finalization(colitem) + The SetupState maintains a stack. The stack starts out empty: - def _callfinalizers(self, colitem) -> None: - finalizers = self._finalizers.pop(colitem, None) - exc = None - while finalizers: - fin = finalizers.pop() - try: - fin() - except TEST_OUTCOME as e: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = e - if exc: - raise exc + [] - def _teardown_with_finalization(self, colitem) -> None: - self._callfinalizers(colitem) - colitem.teardown() - for colitem in self._finalizers: - assert colitem in self.stack + During the setup phase of item1, setup(item1) is called. What it does + is: - def teardown_all(self) -> None: - while self.stack: - self._pop_and_teardown() - for key in list(self._finalizers): - self._teardown_with_finalization(key) - assert not self._finalizers + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() - def teardown_exact(self, item, nextitem) -> None: - needed_collectors = nextitem and nextitem.listchain() or [] - self._teardown_towards(needed_collectors) + The stack is: - def _teardown_towards(self, needed_collectors) -> None: - exc = None - while self.stack: - if self.stack == needed_collectors[: len(self.stack)]: - break - try: - self._pop_and_teardown() - except TEST_OUTCOME as e: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = e - if exc: - raise exc + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: - def prepare(self, colitem) -> None: - """Setup objects along the collector chain to the test-method.""" + [session] - # Check if the last collection node has raised an error. - for col in self.stack: - if hasattr(col, "_prepare_exc"): - exc = col._prepare_exc # type: ignore[attr-defined] + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: Dict[ + Node, + Tuple[ + # Node's finalizers. + List[Callable[[], object]], + # Node's exception, if its setup raised. + Optional[Union[OutcomeException, Exception]], + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: raise exc - needed_collectors = colitem.listchain() for col in needed_collectors[len(self.stack) :]: - self.stack.append(col) + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) try: col.setup() - except TEST_OUTCOME as e: - col._prepare_exc = e # type: ignore[attr-defined] - raise e + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], exc) + raise exc + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Optional[Item]) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = nextitem and nextitem.listchain() or [] + exc = None + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + if nextitem is None: + assert not self.stack def collect_one_node(collector: Collector) -> CollectReport: diff --git a/contrib/python/pytest/py3/_pytest/scope.py b/contrib/python/pytest/py3/_pytest/scope.py new file mode 100644 index 0000000000..7a746fb9fa --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" +from enum import Enum +from functools import total_ordering +from typing import Optional +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import Literal + + _ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function: "_ScopeName" = "function" + Class: "_ScopeName" = "class" + Module: "_ScopeName" = "module" + Package: "_ScopeName" = "package" + Session: "_ScopeName" = "session" + + def next_lower(self) -> "Scope": + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> "Scope": + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: "Scope") -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: "_ScopeName", descr: str, where: Optional[str] = None + ) -> "Scope": + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/contrib/python/pytest/py3/_pytest/setuponly.py b/contrib/python/pytest/py3/_pytest/setuponly.py index 44a1094c0d..531131ce72 100644 --- a/contrib/python/pytest/py3/_pytest/setuponly.py +++ b/contrib/python/pytest/py3/_pytest/setuponly.py @@ -9,6 +9,7 @@ from _pytest.config import ExitCode from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef from _pytest.fixtures import SubRequest +from _pytest.scope import Scope def pytest_addoption(parser: Parser) -> None: @@ -64,7 +65,9 @@ def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: tw = config.get_terminal_writer() tw.line() - tw.write(" " * 2 * fixturedef.scopenum) + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) tw.write( "{step} {scope} {fixture}".format( step=msg.ljust(8), # align the output to TEARDOWN @@ -79,7 +82,7 @@ def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: tw.write(" (fixtures used: {})".format(", ".join(deps))) if hasattr(fixturedef, "cached_param"): - tw.write("[{}]".format(saferepr(fixturedef.cached_param, maxsize=42))) # type: ignore[attr-defined] + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") # type: ignore[attr-defined] tw.flush() diff --git a/contrib/python/pytest/py3/_pytest/skipping.py b/contrib/python/pytest/py3/_pytest/skipping.py index 9aacfecee7..ac7216f838 100644 --- a/contrib/python/pytest/py3/_pytest/skipping.py +++ b/contrib/python/pytest/py3/_pytest/skipping.py @@ -21,7 +21,7 @@ from _pytest.outcomes import skip from _pytest.outcomes import xfail from _pytest.reports import BaseReport from _pytest.runner import CallInfo -from _pytest.store import StoreKey +from _pytest.stash import StashKey def pytest_addoption(parser: Parser) -> None: @@ -49,7 +49,7 @@ def pytest_configure(config: Config) -> None: import pytest old = pytest.xfail - config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass @@ -68,7 +68,7 @@ def pytest_configure(config: Config) -> None: "skipif(condition, ..., *, reason=...): " "skip the given test function if any of the conditions evaluate to True. " "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " - "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif", + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", ) config.addinivalue_line( "markers", @@ -78,7 +78,7 @@ def pytest_configure(config: Config) -> None: "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail", + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", ) @@ -157,11 +157,11 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, return result, reason -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class Skip: """The result of evaluate_skip_marks().""" - reason = attr.ib(type=str) + reason: str = "unconditional skip" def evaluate_skip_marks(item: Item) -> Optional[Skip]: @@ -184,25 +184,22 @@ def evaluate_skip_marks(item: Item) -> Optional[Skip]: return Skip(reason) for mark in item.iter_markers(name="skip"): - if "reason" in mark.kwargs: - reason = mark.kwargs["reason"] - elif mark.args: - reason = mark.args[0] - else: - reason = "unconditional skip" - return Skip(reason) + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None return None -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class Xfail: """The result of evaluate_xfail_marks().""" - reason = attr.ib(type=str) - run = attr.ib(type=bool) - strict = attr.ib(type=bool) - raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]]) + reason: str + run: bool + strict: bool + raises: Optional[Tuple[Type[BaseException], ...]] def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: @@ -230,30 +227,26 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: return None -# Whether skipped due to skip or skipif marks. -skipped_by_mark_key = StoreKey[bool]() # Saves the xfail mark evaluation. Can be refreshed during call if None. -xfailed_key = StoreKey[Optional[Xfail]]() -unexpectedsuccess_key = StoreKey[str]() +xfailed_key = StashKey[Optional[Xfail]]() @hookimpl(tryfirst=True) def pytest_runtest_setup(item: Item) -> None: skipped = evaluate_skip_marks(item) - item._store[skipped_by_mark_key] = skipped is not None if skipped: - skip(skipped.reason) + raise skip.Exception(skipped.reason, _use_item_location=True) - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) if xfailed and not item.config.option.runxfail and not xfailed.run: xfail("[NOTRUN] " + xfailed.reason) @hookimpl(hookwrapper=True) def pytest_runtest_call(item: Item) -> Generator[None, None, None]: - xfailed = item._store.get(xfailed_key, None) + xfailed = item.stash.get(xfailed_key, None) if xfailed is None: - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) if xfailed and not item.config.option.runxfail and not xfailed.run: xfail("[NOTRUN] " + xfailed.reason) @@ -261,25 +254,17 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]: yield # The test run may have added an xfail mark dynamically. - xfailed = item._store.get(xfailed_key, None) + xfailed = item.stash.get(xfailed_key, None) if xfailed is None: - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]): outcome = yield rep = outcome.get_result() - xfailed = item._store.get(xfailed_key, None) - # unittest special case, see setting of unexpectedsuccess_key - if unexpectedsuccess_key in item._store and rep.when == "call": - reason = item._store[unexpectedsuccess_key] - if reason: - rep.longrepr = f"Unexpected success: {reason}" - else: - rep.longrepr = "Unexpected success" - rep.outcome = "failed" - elif item.config.option.runxfail: + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: pass # don't interfere elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): assert call.excinfo.value.msg is not None @@ -301,19 +286,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]): rep.outcome = "passed" rep.wasxfail = xfailed.reason - if ( - item._store.get(skipped_by_mark_key, True) - and rep.skipped - and type(rep.longrepr) is tuple - ): - # Skipped by mark.skipif; change the location of the failure - # to point to the item definition, otherwise it will display - # the location of where the skip exception was raised within pytest. - _, _, reason = rep.longrepr - filename, line = item.reportinfo()[:2] - assert line is not None - rep.longrepr = str(filename), line + 1, reason - def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: if hasattr(report, "wasxfail"): diff --git a/contrib/python/pytest/py3/_pytest/stash.py b/contrib/python/pytest/py3/_pytest/stash.py new file mode 100644 index 0000000000..e61d75b95f --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/stash.py @@ -0,0 +1,112 @@ +from typing import Any +from typing import cast +from typing import Dict +from typing import Generic +from typing import TypeVar +from typing import Union + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: Dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> Union[T, D]: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/contrib/python/pytest/py3/_pytest/stepwise.py b/contrib/python/pytest/py3/_pytest/stepwise.py index 197577c790..4d95a96b87 100644 --- a/contrib/python/pytest/py3/_pytest/stepwise.py +++ b/contrib/python/pytest/py3/_pytest/stepwise.py @@ -31,13 +31,16 @@ def pytest_addoption(parser: Parser) -> None: action="store_true", default=False, dest="stepwise_skip", - help="ignore the first failing test but stop on the next failing test", + help="ignore the first failing test but stop on the next failing test.\n" + "implicitly enables --stepwise.", ) @pytest.hookimpl def pytest_configure(config: Config) -> None: - # We should always have a cache as cache provider plugin uses tryfirst=True + if config.option.stepwise_skip: + # allow --stepwise-skip to work on it's own merits. + config.option.stepwise = True if config.getoption("stepwise"): config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") diff --git a/contrib/python/pytest/py3/_pytest/store.py b/contrib/python/pytest/py3/_pytest/store.py deleted file mode 100644 index e5008cfc5a..0000000000 --- a/contrib/python/pytest/py3/_pytest/store.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Any -from typing import cast -from typing import Dict -from typing import Generic -from typing import TypeVar -from typing import Union - - -__all__ = ["Store", "StoreKey"] - - -T = TypeVar("T") -D = TypeVar("D") - - -class StoreKey(Generic[T]): - """StoreKey is an object used as a key to a Store. - - A StoreKey is associated with the type T of the value of the key. - - A StoreKey is unique and cannot conflict with another key. - """ - - __slots__ = () - - -class Store: - """Store is a type-safe heterogenous mutable mapping that - allows keys and value types to be defined separately from - where it (the Store) is created. - - Usually you will be given an object which has a ``Store``: - - .. code-block:: python - - store: Store = some_object.store - - If a module wants to store data in this Store, it creates StoreKeys - for its keys (at the module level): - - .. code-block:: python - - some_str_key = StoreKey[str]() - some_bool_key = StoreKey[bool]() - - To store information: - - .. code-block:: python - - # Value type must match the key. - store[some_str_key] = "value" - store[some_bool_key] = True - - To retrieve the information: - - .. code-block:: python - - # The static type of some_str is str. - some_str = store[some_str_key] - # The static type of some_bool is bool. - some_bool = store[some_bool_key] - - Why use this? - ------------- - - Problem: module Internal defines an object. Module External, which - module Internal doesn't know about, receives the object and wants to - attach information to it, to be retrieved later given the object. - - Bad solution 1: Module External assigns private attributes directly on - the object. This doesn't work well because the type checker doesn't - know about these attributes and it complains about undefined attributes. - - Bad solution 2: module Internal adds a ``Dict[str, Any]`` attribute to - the object. Module External stores its data in private keys of this dict. - This doesn't work well because retrieved values are untyped. - - Good solution: module Internal adds a ``Store`` to the object. Module - External mints StoreKeys for its own keys. Module External stores and - retrieves its data using these keys. - """ - - __slots__ = ("_store",) - - def __init__(self) -> None: - self._store: Dict[StoreKey[Any], object] = {} - - def __setitem__(self, key: StoreKey[T], value: T) -> None: - """Set a value for key.""" - self._store[key] = value - - def __getitem__(self, key: StoreKey[T]) -> T: - """Get the value for key. - - Raises ``KeyError`` if the key wasn't set before. - """ - return cast(T, self._store[key]) - - def get(self, key: StoreKey[T], default: D) -> Union[T, D]: - """Get the value for key, or return default if the key wasn't set - before.""" - try: - return self[key] - except KeyError: - return default - - def setdefault(self, key: StoreKey[T], default: T) -> T: - """Return the value of key if already set, otherwise set the value - of key to default and return default.""" - try: - return self[key] - except KeyError: - self[key] = default - return default - - def __delitem__(self, key: StoreKey[T]) -> None: - """Delete the value for key. - - Raises ``KeyError`` if the key wasn't set before. - """ - del self._store[key] - - def __contains__(self, key: StoreKey[T]) -> bool: - """Return whether key was set.""" - return key in self._store diff --git a/contrib/python/pytest/py3/_pytest/terminal.py b/contrib/python/pytest/py3/_pytest/terminal.py index fbfb09aecf..ccbd84d7d7 100644 --- a/contrib/python/pytest/py3/_pytest/terminal.py +++ b/contrib/python/pytest/py3/_pytest/terminal.py @@ -14,6 +14,7 @@ from pathlib import Path from typing import Any from typing import Callable from typing import cast +from typing import ClassVar from typing import Dict from typing import Generator from typing import List @@ -28,7 +29,6 @@ from typing import Union import attr import pluggy -import py import _pytest._version from _pytest import nodes @@ -277,7 +277,7 @@ def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]: return outcome, letter, outcome.upper() -@attr.s +@attr.s(auto_attribs=True) class WarningReport: """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. @@ -285,30 +285,24 @@ class WarningReport: User friendly message about the warning. :ivar str|None nodeid: nodeid that generated the warning (see ``get_location``). - :ivar tuple|py.path.local fslocation: + :ivar tuple fslocation: File system location of the source of the warning (see ``get_location``). """ - message = attr.ib(type=str) - nodeid = attr.ib(type=Optional[str], default=None) - fslocation = attr.ib( - type=Optional[Union[Tuple[str, int], py.path.local]], default=None - ) - count_towards_summary = True + message: str + nodeid: Optional[str] = None + fslocation: Optional[Tuple[str, int]] = None + + count_towards_summary: ClassVar = True def get_location(self, config: Config) -> Optional[str]: """Return the more user-friendly information about the location of a warning, or None.""" if self.nodeid: return self.nodeid if self.fslocation: - if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: - filename, linenum = self.fslocation[:2] - relpath = bestrelpath( - config.invocation_params.dir, absolutepath(filename) - ) - return f"{relpath}:{linenum}" - else: - return str(self.fslocation) + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" return None @@ -325,7 +319,6 @@ class TerminalReporter: self.stats: Dict[str, List[Any]] = {} self._main_color: Optional[str] = None self._known_types: Optional[List[str]] = None - self.startdir = config.invocation_dir self.startpath = config.invocation_params.dir if file is None: file = sys.stdout @@ -475,7 +468,9 @@ class TerminalReporter: return True def pytest_warning_recorded( - self, warning_message: warnings.WarningMessage, nodeid: str, + self, + warning_message: warnings.WarningMessage, + nodeid: str, ) -> None: from _pytest.warnings import warning_record_to_str @@ -582,7 +577,7 @@ class TerminalReporter: if self.verbosity <= 0 and self._show_progress_info: if self._show_progress_info == "count": num_tests = self._session.testscollected - progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) + progress_length = len(f" [{num_tests}/{num_tests}]") else: progress_length = len(" [100%]") @@ -604,7 +599,7 @@ class TerminalReporter: if self._show_progress_info == "count": if collected: progress = self._progress_nodeids_reported - counter_format = "{{:{}d}}".format(len(str(collected))) + counter_format = f"{{:{len(str(collected))}d}}" format_string = f" [{counter_format}/{{}}]" return format_string.format(len(progress), collected) return f" [ {collected} / {collected} ]" @@ -663,10 +658,7 @@ class TerminalReporter: skipped = len(self.stats.get("skipped", [])) deselected = len(self.stats.get("deselected", [])) selected = self._numcollected - errors - skipped - deselected - if final: - line = "collected " - else: - line = "collecting " + line = "collected " if final else "collecting " line += ( str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") ) @@ -698,9 +690,9 @@ class TerminalReporter: pypy_version_info = getattr(sys, "pypy_version_info", None) if pypy_version_info: verinfo = ".".join(map(str, pypy_version_info[:3])) - msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3]) - msg += ", pytest-{}, py-{}, pluggy-{}".format( - _pytest._version.version, py.__version__, pluggy.__version__ + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += ", pytest-{}, pluggy-{}".format( + _pytest._version.version, pluggy.__version__ ) if ( self.verbosity > 0 @@ -710,7 +702,7 @@ class TerminalReporter: msg += " -- " + str(sys.executable) self.write_line(msg) lines = self.config.hook.pytest_report_header( - config=self.config, startdir=self.startdir + config=self.config, start_path=self.startpath ) self._write_report_lines_from_hooks(lines) @@ -745,7 +737,9 @@ class TerminalReporter: self.report_collect(True) lines = self.config.hook.pytest_report_collectionfinish( - config=self.config, startdir=self.startdir, items=session.items + config=self.config, + start_path=self.startpath, + items=session.items, ) self._write_report_lines_from_hooks(lines) @@ -762,9 +756,6 @@ class TerminalReporter: rep.toterminal(self._tw) def _printcollecteditems(self, items: Sequence[Item]) -> None: - # To print out items and their parent collectors - # we take care to leave out Instances aka () - # because later versions are going to get rid of them anyway. if self.config.option.verbose < 0: if self.config.option.verbose < -1: counts = Counter(item.nodeid.split("::", 1)[0] for item in items) @@ -784,8 +775,6 @@ class TerminalReporter: stack.pop() for col in needed_collectors[len(stack) :]: stack.append(col) - if col.name == "()": # Skip Instances. - continue indent = (len(stack) - 1) * " " self._tw.line(f"{indent}{col}") if self.config.option.verbose >= 1: @@ -856,8 +845,10 @@ class TerminalReporter: yellow=True, ) - def _locationline(self, nodeid, fspath, lineno, domain): - def mkrel(nodeid): + def _locationline( + self, nodeid: str, fspath: str, lineno: Optional[int], domain: str + ) -> str: + def mkrel(nodeid: str) -> str: line = self.config.cwd_relative_nodeid(nodeid) if domain and line.endswith(domain): line = line[: -len(domain)] @@ -867,13 +858,12 @@ class TerminalReporter: return line # collect_fspath comes from testid which has a "/"-normalized path. - if fspath: res = mkrel(nodeid) if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( "\\", nodes.SEP ): - res += " <- " + bestrelpath(self.startpath, fspath) + res += " <- " + bestrelpath(self.startpath, Path(fspath)) else: res = "[location]" return res + " " @@ -897,11 +887,7 @@ class TerminalReporter: # Summaries for sessionfinish. # def getreports(self, name: str): - values = [] - for x in self.stats.get(name, []): - if not hasattr(x, "_pdbshown"): - values.append(x) - return values + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] def summary_warnings(self) -> None: if self.hasopt("w"): @@ -953,7 +939,9 @@ class TerminalReporter: message = message.rstrip() self._tw.line(message) self._tw.line() - self._tw.line("-- Docs: https://docs.pytest.org/en/stable/warnings.html") + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) def summary_passes(self) -> None: if self.config.option.tbstyle != "no": @@ -1058,7 +1046,7 @@ class TerminalReporter: msg = ", ".join(line_parts) main_markup = {main_color: True} - duration = " in {}".format(format_session_duration(session_duration)) + duration = f" in {format_session_duration(session_duration)}" duration_with_markup = self._tw.markup(duration, **main_markup) if display_sep: fullwidth += len(duration_with_markup) - len(duration) @@ -1310,7 +1298,8 @@ def _get_line_with_reprcrash_message( def _folded_skips( - startpath: Path, skipped: Sequence[CollectReport], + startpath: Path, + skipped: Sequence[CollectReport], ) -> List[Tuple[int, str, Optional[int], str]]: d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {} for event in skipped: diff --git a/contrib/python/pytest/py3/_pytest/threadexception.py b/contrib/python/pytest/py3/_pytest/threadexception.py index 1c1f62fdb7..43341e739a 100644 --- a/contrib/python/pytest/py3/_pytest/threadexception.py +++ b/contrib/python/pytest/py3/_pytest/threadexception.py @@ -34,11 +34,10 @@ class catch_threading_exception: """ def __init__(self) -> None: - # See https://github.com/python/typeshed/issues/4767 regarding the underscore. - self.args: Optional["threading._ExceptHookArgs"] = None - self._old_hook: Optional[Callable[["threading._ExceptHookArgs"], Any]] = None + self.args: Optional["threading.ExceptHookArgs"] = None + self._old_hook: Optional[Callable[["threading.ExceptHookArgs"], Any]] = None - def _hook(self, args: "threading._ExceptHookArgs") -> None: + def _hook(self, args: "threading.ExceptHookArgs") -> None: self.args = args def __enter__(self) -> "catch_threading_exception": @@ -62,14 +61,13 @@ def thread_exception_runtest_hook() -> Generator[None, None, None]: with catch_threading_exception() as cm: yield if cm.args: - if cm.args.thread is not None: - thread_name = cm.args.thread.name - else: - thread_name = "<unknown>" + thread_name = "<unknown>" if cm.args.thread is None else cm.args.thread.name msg = f"Exception in thread {thread_name}\n\n" msg += "".join( traceback.format_exception( - cm.args.exc_type, cm.args.exc_value, cm.args.exc_traceback, + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, ) ) warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) diff --git a/contrib/python/pytest/py3/_pytest/tmpdir.py b/contrib/python/pytest/py3/_pytest/tmpdir.py index a6bd383a9c..f901fd5727 100644 --- a/contrib/python/pytest/py3/_pytest/tmpdir.py +++ b/contrib/python/pytest/py3/_pytest/tmpdir.py @@ -7,7 +7,6 @@ from pathlib import Path from typing import Optional import attr -import py from .pathlib import LOCK_TIMEOUT from .pathlib import make_numbered_dir @@ -54,7 +53,10 @@ class TempPathFactory: @classmethod def from_config( - cls, config: Config, *, _ispytest: bool = False, + cls, + config: Config, + *, + _ispytest: bool = False, ) -> "TempPathFactory": """Create a factory according to pytest configuration. @@ -115,7 +117,12 @@ class TempPathFactory: # use a sub-directory in the temproot to speed-up # make_numbered_dir() call rootdir = temproot.joinpath(f"pytest-of-{user}") - rootdir.mkdir(mode=0o700, exist_ok=True) + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) # Because we use exist_ok=True with a predictable name, make sure # we are the owners, to prevent any funny business (on unix, where # temproot is usually shared). @@ -148,29 +155,6 @@ class TempPathFactory: return basetemp -@final -@attr.s(init=False) -class TempdirFactory: - """Backward comptibility wrapper that implements :class:``py.path.local`` - for :class:``TempPathFactory``.""" - - _tmppath_factory = attr.ib(type=TempPathFactory) - - def __init__( - self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False - ) -> None: - check_ispytest(_ispytest) - self._tmppath_factory = tmppath_factory - - def mktemp(self, basename: str, numbered: bool = True) -> py.path.local: - """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" - return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) - - def getbasetemp(self) -> py.path.local: - """Backward compat wrapper for ``_tmppath_factory.getbasetemp``.""" - return py.path.local(self._tmppath_factory.getbasetemp().resolve()) - - def get_user() -> Optional[str]: """Return the current user name, or None if getuser() does not work in the current environment (see #1010).""" @@ -183,30 +167,21 @@ def get_user() -> Optional[str]: def pytest_configure(config: Config) -> None: - """Create a TempdirFactory and attach it to the config object. + """Create a TempPathFactory and attach it to the config object. This is to comply with existing plugins which expect the handler to be available at pytest_configure time, but ideally should be moved entirely - to the tmpdir_factory session fixture. + to the tmp_path_factory session fixture. """ mp = MonkeyPatch() - tmppath_handler = TempPathFactory.from_config(config, _ispytest=True) - t = TempdirFactory(tmppath_handler, _ispytest=True) - config._cleanup.append(mp.undo) - mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) - mp.setattr(config, "_tmpdirhandler", t, raising=False) - - -@fixture(scope="session") -def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: - """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.""" - # Set dynamically by pytest_configure() above. - return request.config._tmpdirhandler # type: ignore + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) @fixture(scope="session") def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: - """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.""" + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" # Set dynamically by pytest_configure() above. return request.config._tmp_path_factory # type: ignore @@ -220,24 +195,6 @@ def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: @fixture -def tmpdir(tmp_path: Path) -> py.path.local: - """Return a temporary directory path object which is unique to each test - function invocation, created as a sub directory of the base temporary - directory. - - By default, a new base temporary directory is created each test session, - and old bases are removed after 3 sessions, to aid in debugging. If - ``--basetemp`` is used then it is cleared each session. See :ref:`base - temporary directory`. - - The returned object is a `py.path.local`_ path object. - - .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - """ - return py.path.local(tmp_path) - - -@fixture def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path: """Return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary diff --git a/contrib/python/pytest/py3/_pytest/unittest.py b/contrib/python/pytest/py3/_pytest/unittest.py index 55f15efe4b..0315168b04 100644 --- a/contrib/python/pytest/py3/_pytest/unittest.py +++ b/contrib/python/pytest/py3/_pytest/unittest.py @@ -29,13 +29,11 @@ from _pytest.python import Class from _pytest.python import Function from _pytest.python import PyCollector from _pytest.runner import CallInfo -from _pytest.skipping import skipped_by_mark_key -from _pytest.skipping import unexpectedsuccess_key +from _pytest.scope import Scope if TYPE_CHECKING: import unittest - - from _pytest.fixtures import _Scope + import twisted.trial.unittest _SysExcInfoType = Union[ Tuple[Type[BaseException], BaseException, types.TracebackType], @@ -103,7 +101,7 @@ class UnitTestCase(Class): "setUpClass", "tearDownClass", "doClassCleanups", - scope="class", + scope=Scope.Class, pass_self=False, ) if class_fixture: @@ -114,7 +112,7 @@ class UnitTestCase(Class): "setup_method", "teardown_method", None, - scope="function", + scope=Scope.Function, pass_self=True, ) if method_fixture: @@ -126,7 +124,7 @@ def _make_xunit_fixture( setup_name: str, teardown_name: str, cleanup_name: Optional[str], - scope: "_Scope", + scope: Scope, pass_self: bool, ): setup = getattr(obj, setup_name, None) @@ -142,15 +140,15 @@ def _make_xunit_fixture( pass @pytest.fixture( - scope=scope, + scope=scope.value, autouse=True, # Use a unique name to speed up lookup. - name=f"unittest_{setup_name}_fixture_{obj.__qualname__}", + name=f"_unittest_{setup_name}_fixture_{obj.__qualname__}", ) def fixture(self, request: FixtureRequest) -> Generator[None, None, None]: if _is_skipped(self): reason = self.__unittest_skip_why__ - pytest.skip(reason) + raise pytest.skip.Exception(reason, _use_item_location=True) if setup is not None: try: if pass_self: @@ -187,6 +185,15 @@ class TestCaseFunction(Function): _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None _testcase: Optional["unittest.TestCase"] = None + def _getobj(self): + assert self.parent is not None + # Unlike a regular Function in a Class, where `item.obj` returns + # a *bound* method (attached to an instance), TestCaseFunction's + # `obj` returns an *unbound* method (not attached to an instance). + # This inconsistency is probably not desirable, but needs some + # consideration before changing. + return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined] + def setup(self) -> None: # A bound method to be called during teardown() if set (see 'runtest()'). self._explicit_tearDown: Optional[Callable[[], None]] = None @@ -210,7 +217,7 @@ class TestCaseFunction(Function): # Unwrap potential exception info (see twisted trial support below). rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) try: - excinfo = _pytest._code.ExceptionInfo(rawexcinfo) # type: ignore[arg-type] + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type] # Invoke the attributes to trigger storing the traceback # trial causes some issue there. excinfo.value @@ -256,9 +263,8 @@ class TestCaseFunction(Function): def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None: try: - skip(reason) + raise pytest.skip.Exception(reason, _use_item_location=True) except skip.Exception: - self._store[skipped_by_mark_key] = True self._addexcinfo(sys.exc_info()) def addExpectedFailure( @@ -273,9 +279,18 @@ class TestCaseFunction(Function): self._addexcinfo(sys.exc_info()) def addUnexpectedSuccess( - self, testcase: "unittest.TestCase", reason: str = "" + self, + testcase: "unittest.TestCase", + reason: Optional["twisted.trial.unittest.Todo"] = None, ) -> None: - self._store[unexpectedsuccess_key] = reason + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) def addSuccess(self, testcase: "unittest.TestCase") -> None: pass @@ -283,15 +298,6 @@ class TestCaseFunction(Function): def stopTest(self, testcase: "unittest.TestCase") -> None: pass - def _expecting_failure(self, test_method) -> bool: - """Return True if the given unittest method (or the entire class) is marked - with @expectedFailure.""" - expecting_failure_method = getattr( - test_method, "__unittest_expecting_failure__", False - ) - expecting_failure_class = getattr(self, "__unittest_expecting_failure__", False) - return bool(expecting_failure_class or expecting_failure_method) - def runtest(self) -> None: from _pytest.debugging import maybe_wrap_pytest_function_for_tracing @@ -325,7 +331,7 @@ class TestCaseFunction(Function): def _prunetraceback( self, excinfo: _pytest._code.ExceptionInfo[BaseException] ) -> None: - Function._prunetraceback(self, excinfo) + super()._prunetraceback(excinfo) traceback = excinfo.traceback.filter( lambda x: not x.frame.f_globals.get("__unittest") ) @@ -343,6 +349,10 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: except AttributeError: pass + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. unittest = sys.modules.get("unittest") if ( unittest @@ -350,7 +360,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] ): excinfo = call.excinfo - # Let's substitute the excinfo with a pytest.skip one. call2 = CallInfo[None].from_call( lambda: pytest.skip(str(excinfo.value)), call.when ) diff --git a/contrib/python/pytest/py3/_pytest/warning_types.py b/contrib/python/pytest/py3/_pytest/warning_types.py index 2eadd9fe4d..2a97a31978 100644 --- a/contrib/python/pytest/py3/_pytest/warning_types.py +++ b/contrib/python/pytest/py3/_pytest/warning_types.py @@ -42,7 +42,6 @@ class PytestCollectionWarning(PytestWarning): __module__ = "pytest" -@final class PytestDeprecationWarning(PytestWarning, DeprecationWarning): """Warning class for features that will be removed in a future version.""" @@ -50,6 +49,20 @@ class PytestDeprecationWarning(PytestWarning, DeprecationWarning): @final +class PytestRemovedIn7Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 7.""" + + __module__ = "pytest" + + +@final +class PytestRemovedIn8Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 8.""" + + __module__ = "pytest" + + +@final class PytestExperimentalApiWarning(PytestWarning, FutureWarning): """Warning category used to denote experiments in pytest. @@ -116,7 +129,7 @@ _W = TypeVar("_W", bound=PytestWarning) @final -@attr.s +@attr.s(auto_attribs=True) class UnformattedWarning(Generic[_W]): """A warning meant to be formatted during runtime. @@ -124,8 +137,8 @@ class UnformattedWarning(Generic[_W]): as opposed to a direct message. """ - category = attr.ib(type=Type["_W"]) - template = attr.ib(type=str) + category: Type["_W"] + template: str def format(self, **kwargs: Any) -> _W: """Return an instance of the warning category, formatted with given kwargs.""" diff --git a/contrib/python/pytest/py3/_pytest/warnings.py b/contrib/python/pytest/py3/_pytest/warnings.py index 35eed96df5..c0c946cbde 100644 --- a/contrib/python/pytest/py3/_pytest/warnings.py +++ b/contrib/python/pytest/py3/_pytest/warnings.py @@ -21,7 +21,7 @@ def pytest_configure(config: Config) -> None: config.addinivalue_line( "markers", "filterwarnings(warning): add a warning filter to the given test. " - "see https://docs.pytest.org/en/stable/warnings.html#pytest-mark-filterwarnings ", + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", ) @@ -49,6 +49,8 @@ def catch_warnings_for_item( warnings.filterwarnings("always", category=DeprecationWarning) warnings.filterwarnings("always", category=PendingDeprecationWarning) + warnings.filterwarnings("error", category=pytest.PytestRemovedIn7Warning) + apply_warning_filters(config_filters, cmdline_filters) # apply filters from "filterwarnings" marks |