diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2025-05-05 12:31:52 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2025-05-05 12:41:33 +0300 |
commit | 6ff49ec58061f642c3a2f83c61eba12820787dfc (patch) | |
tree | c733ec9bdb15ed280080d31dea8725bfec717acd /contrib/python/pytest/py3/_pytest | |
parent | eefca8305c6a545cc6b16dca3eb0d91dcef2adcd (diff) | |
download | ydb-6ff49ec58061f642c3a2f83c61eba12820787dfc.tar.gz |
Intermediate changes
commit_hash:8b3bb826b17db8329ed1221f545c0645f12c552d
Diffstat (limited to 'contrib/python/pytest/py3/_pytest')
65 files changed, 3155 insertions, 1914 deletions
diff --git a/contrib/python/pytest/py3/_pytest/__init__.py b/contrib/python/pytest/py3/_pytest/__init__.py index 8a406c5c751..9062768eae3 100644 --- a/contrib/python/pytest/py3/_pytest/__init__.py +++ b/contrib/python/pytest/py3/_pytest/__init__.py @@ -1,7 +1,8 @@ __all__ = ["__version__", "version_tuple"] try: - from ._version import version as __version__, version_tuple + from ._version import version as __version__ + from ._version import version_tuple except ImportError: # pragma: no cover # broken installation, we don't even try # unknown only works because we do poor mans version compare diff --git a/contrib/python/pytest/py3/_pytest/_argcomplete.py b/contrib/python/pytest/py3/_pytest/_argcomplete.py index 6a8083770ae..c24f925202a 100644 --- a/contrib/python/pytest/py3/_pytest/_argcomplete.py +++ b/contrib/python/pytest/py3/_pytest/_argcomplete.py @@ -61,10 +61,11 @@ If things do not work right away: which should throw a KeyError: 'COMPLINE' (which is properly set by the global argcomplete script). """ + import argparse +from glob import glob import os import sys -from glob import glob from typing import Any from typing import List from typing import Optional diff --git a/contrib/python/pytest/py3/_pytest/_code/__init__.py b/contrib/python/pytest/py3/_pytest/_code/__init__.py index 511d0dde661..b0a418e9555 100644 --- a/contrib/python/pytest/py3/_pytest/_code/__init__.py +++ b/contrib/python/pytest/py3/_pytest/_code/__init__.py @@ -1,4 +1,5 @@ """Python inspection/code generation API.""" + from .code import Code from .code import ExceptionInfo from .code import filter_traceback @@ -9,6 +10,7 @@ from .code import TracebackEntry from .source import getrawcode from .source import Source + __all__ = [ "Code", "ExceptionInfo", diff --git a/contrib/python/pytest/py3/_pytest/_code/code.py b/contrib/python/pytest/py3/_pytest/_code/code.py index 9b051332baf..4b0a2a385e2 100644 --- a/contrib/python/pytest/py3/_pytest/_code/code.py +++ b/contrib/python/pytest/py3/_pytest/_code/code.py @@ -1,14 +1,14 @@ import ast import dataclasses import inspect -import os -import re -import sys -import traceback from inspect import CO_VARARGS from inspect import CO_VARKEYWORDS from io import StringIO +import os from pathlib import Path +import re +import sys +import traceback from traceback import format_exception_only from types import CodeType from types import FrameType @@ -17,18 +17,21 @@ from typing import Any from typing import Callable from typing import ClassVar from typing import Dict +from typing import Final +from typing import final from typing import Generic from typing import Iterable from typing import List +from typing import Literal from typing import Mapping from typing import Optional from typing import overload from typing import Pattern from typing import Sequence from typing import Set +from typing import SupportsIndex from typing import Tuple from typing import Type -from typing import TYPE_CHECKING from typing import TypeVar from typing import Union @@ -42,22 +45,17 @@ from _pytest._code.source import Source from _pytest._io import TerminalWriter from _pytest._io.saferepr import safeformat from _pytest._io.saferepr import saferepr -from _pytest.compat import final from _pytest.compat import get_real_func from _pytest.deprecated import check_ispytest from _pytest.pathlib import absolutepath from _pytest.pathlib import bestrelpath -if TYPE_CHECKING: - from typing_extensions import Final - from typing_extensions import Literal - from typing_extensions import SupportsIndex - - _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] if sys.version_info[:2] < (3, 11): from exceptiongroup import BaseExceptionGroup +_TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + class Code: """Wrapper around Python code objects.""" @@ -396,11 +394,11 @@ class Traceback(List[TracebackEntry]): def filter( self, - # TODO(py38): change to positional only. - _excinfo_or_fn: Union[ + excinfo_or_fn: Union[ "ExceptionInfo[BaseException]", Callable[[TracebackEntry], bool], ], + /, ) -> "Traceback": """Return a Traceback instance with certain items removed. @@ -411,10 +409,10 @@ class Traceback(List[TracebackEntry]): ``TracebackEntry`` instance, and should return True when the item should be added to the ``Traceback``, False when not. """ - if isinstance(_excinfo_or_fn, ExceptionInfo): - fn = lambda x: not x.ishidden(_excinfo_or_fn) # noqa: E731 + if isinstance(excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(excinfo_or_fn) # noqa: E731 else: - fn = _excinfo_or_fn + fn = excinfo_or_fn return Traceback(filter(fn, self)) def recursionindex(self) -> Optional[int]: @@ -489,9 +487,10 @@ class ExceptionInfo(Generic[E]): .. versionadded:: 7.4 """ - assert ( - exception.__traceback__ - ), "Exceptions passed to ExcInfo.from_exception(...) must have a non-None __traceback__." + assert exception.__traceback__, ( + "Exceptions passed to ExcInfo.from_exception(...)" + " must have a non-None __traceback__." + ) exc_info = (type(exception), exception, exception.__traceback__) return cls.from_exc_info(exc_info, exprinfo) @@ -590,9 +589,7 @@ class ExceptionInfo(Generic[E]): def __repr__(self) -> str: if self._excinfo is None: return "<ExceptionInfo for raises contextmanager>" - return "<{} {} tblen={}>".format( - self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback) - ) + return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" def exconly(self, tryshort: bool = False) -> str: """Return the exception as a string. @@ -633,7 +630,7 @@ class ExceptionInfo(Generic[E]): def getrepr( self, showlocals: bool = False, - style: "_TracebackStyle" = "long", + style: _TracebackStyle = "long", abspath: bool = False, tbfilter: Union[ bool, Callable[["ExceptionInfo[BaseException]"], Traceback] @@ -700,6 +697,25 @@ class ExceptionInfo(Generic[E]): ) return fmt.repr_excinfo(self) + def _stringify_exception(self, exc: BaseException) -> str: + try: + notes = getattr(exc, "__notes__", []) + except KeyError: + # Workaround for https://github.com/python/cpython/issues/98778 on + # Python <= 3.9, and some 3.10 and 3.11 patch versions. + HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) + if sys.version_info[:2] <= (3, 11) and isinstance(exc, HTTPError): + notes = [] + else: + raise + + return "\n".join( + [ + str(exc), + *notes, + ] + ) + def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]": """Check whether the regular expression `regexp` matches the string representation of the exception using :func:`python:re.search`. @@ -707,7 +723,7 @@ class ExceptionInfo(Generic[E]): If it matches `True` is returned, otherwise an `AssertionError` is raised. """ __tracebackhide__ = True - value = str(self.value) + value = self._stringify_exception(self.value) msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" if regexp == value: msg += "\n Did you mean to `re.escape()` the regex?" @@ -715,6 +731,69 @@ class ExceptionInfo(Generic[E]): # Return True to allow for "assert excinfo.match()". return True + def _group_contains( + self, + exc_group: BaseExceptionGroup[BaseException], + expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], + match: Union[str, Pattern[str], None], + target_depth: Optional[int] = None, + current_depth: int = 1, + ) -> bool: + """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" + if (target_depth is not None) and (current_depth > target_depth): + # already descended past the target depth + return False + for exc in exc_group.exceptions: + if isinstance(exc, BaseExceptionGroup): + if self._group_contains( + exc, expected_exception, match, target_depth, current_depth + 1 + ): + return True + if (target_depth is not None) and (current_depth != target_depth): + # not at the target depth, no match + continue + if not isinstance(exc, expected_exception): + continue + if match is not None: + value = self._stringify_exception(exc) + if not re.search(match, value): + continue + return True + return False + + def group_contains( + self, + expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], + *, + match: Union[str, Pattern[str], None] = None, + depth: Optional[int] = None, + ) -> bool: + """Check whether a captured exception group contains a matching exception. + + :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + + :param str | Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its `PEP-678 <https://peps.python.org/pep-0678/>` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. + + :param Optional[int] depth: + If `None`, will search for a matching exception at any nesting depth. + If >= 1, will only match an exception if it's at the specified depth (depth = 1 being + the exceptions contained within the topmost exception group). + """ + msg = "Captured exception is not an instance of `BaseExceptionGroup`" + assert isinstance(self.value, BaseExceptionGroup), msg + msg = "`depth` must be >= 1 if specified" + assert (depth is None) or (depth >= 1), msg + return self._group_contains(self.value, expected_exception, match, depth) + @dataclasses.dataclass class FormattedExcinfo: @@ -725,7 +804,7 @@ class FormattedExcinfo: fail_marker: ClassVar = "E" showlocals: bool = False - style: "_TracebackStyle" = "long" + style: _TracebackStyle = "long" abspath: bool = True tbfilter: Union[bool, Callable[[ExceptionInfo[BaseException]], Traceback]] = True funcargs: bool = False @@ -938,13 +1017,8 @@ class FormattedExcinfo: extraline: Optional[str] = ( "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" " The following exception happened when comparing locals in the stack frame:\n" - " {exc_type}: {exc_msg}\n" - " Displaying first and last {max_frames} stack frames out of {total}." - ).format( - exc_type=type(e).__name__, - exc_msg=str(e), - max_frames=max_frames, - total=len(traceback), + f" {type(e).__name__}: {str(e)}\n" + f" Displaying first and last {max_frames} stack frames out of {len(traceback)}." ) # Type ignored because adding two instances of a List subtype # currently incorrectly has type List instead of the subtype. @@ -1090,7 +1164,7 @@ class ReprExceptionInfo(ExceptionRepr): class ReprTraceback(TerminalRepr): reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]] extraline: Optional[str] - style: "_TracebackStyle" + style: _TracebackStyle entrysep: ClassVar = "_ " @@ -1124,7 +1198,7 @@ class ReprTracebackNative(ReprTraceback): class ReprEntryNative(TerminalRepr): lines: Sequence[str] - style: ClassVar["_TracebackStyle"] = "native" + style: ClassVar[_TracebackStyle] = "native" def toterminal(self, tw: TerminalWriter) -> None: tw.write("".join(self.lines)) @@ -1136,7 +1210,7 @@ class ReprEntry(TerminalRepr): reprfuncargs: Optional["ReprFuncArgs"] reprlocals: Optional["ReprLocals"] reprfileloc: Optional["ReprFileLocation"] - style: "_TracebackStyle" + style: _TracebackStyle def _write_entry_lines(self, tw: TerminalWriter) -> None: """Write the source code portions of a list of traceback entries with syntax highlighting. @@ -1151,7 +1225,6 @@ class ReprEntry(TerminalRepr): the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" character, as doing so might break line continuations. """ - if not self.lines: return diff --git a/contrib/python/pytest/py3/_pytest/_code/source.py b/contrib/python/pytest/py3/_pytest/_code/source.py index 208cfb80037..835cd1d7b6a 100644 --- a/contrib/python/pytest/py3/_pytest/_code/source.py +++ b/contrib/python/pytest/py3/_pytest/_code/source.py @@ -1,10 +1,9 @@ import ast +from bisect import bisect_right import inspect import textwrap import tokenize import types -import warnings -from bisect import bisect_right from typing import Iterable from typing import Iterator from typing import List @@ -12,6 +11,7 @@ from typing import Optional from typing import overload from typing import Tuple from typing import Union +import warnings class Source: @@ -149,8 +149,7 @@ def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[i values: List[int] = [] for x in ast.walk(node): if isinstance(x, (ast.stmt, ast.ExceptHandler)): - # Before Python 3.8, the lineno of a decorated class or function pointed at the decorator. - # Since Python 3.8, the lineno points to the class/def, so need to include the decorators. + # The lineno points to the class/def, so need to include the decorators. if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): for d in x.decorator_list: values.append(d.lineno - 1) @@ -197,7 +196,9 @@ def getstatementrange_ast( # by using the BlockFinder helper used which inspect.getsource() uses itself. block_finder = inspect.BlockFinder() # If we start with an indented line, put blockfinder to "started" mode. - block_finder.started = source.lines[start][0].isspace() + block_finder.started = ( + bool(source.lines[start]) and source.lines[start][0].isspace() + ) it = ((x + "\n") for x in source.lines[start:end]) try: for tok in tokenize.generate_tokens(lambda: next(it)): diff --git a/contrib/python/pytest/py3/_pytest/_io/pprint.py b/contrib/python/pytest/py3/_pytest/_io/pprint.py new file mode 100644 index 00000000000..61fc8ba9b24 --- /dev/null +++ b/contrib/python/pytest/py3/_pytest/_io/pprint.py @@ -0,0 +1,675 @@ +# This module was imported from the cpython standard library +# (https://github.com/python/cpython/) at commit +# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12). +# +# +# Original Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. +import collections as _collections +import dataclasses as _dataclasses +from io import StringIO as _StringIO +import re +import types as _types +from typing import Any +from typing import Callable +from typing import Dict +from typing import IO +from typing import Iterator +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return (str(type(self.obj)), id(self.obj)) < ( + str(type(other.obj)), + id(other.obj), + ) + + +def _safe_tuple(t): + """Helper function for comparing 2-tuples""" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__( + self, + indent: int = 4, + width: int = 80, + depth: Optional[int] = None, + ) -> None: + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + """ + if indent < 0: + raise ValueError("indent must be >= 0") + if depth is not None and depth <= 0: + raise ValueError("depth must be > 0") + if not width: + raise ValueError("width must be != 0") + self._depth = depth + self._indent_per_level = indent + self._width = width + + def pformat(self, object: Any) -> str: + sio = _StringIO() + self._format(object, sio, 0, 0, set(), 0) + return sio.getvalue() + + def _format( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + return + + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context.add(objid) + p(self, object, stream, indent, allowance, context, level + 1) + context.remove(objid) + elif ( + _dataclasses.is_dataclass(object) + and not isinstance(object, type) + and object.__dataclass_params__.repr + and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") + and "__create_fn__" in object.__repr__.__wrapped__.__qualname__ + ): + context.add(objid) + self._pprint_dataclass( + object, stream, indent, allowance, context, level + 1 + ) + context.remove(objid) + else: + stream.write(self._repr(object, context, level)) + + def _pprint_dataclass( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + cls_name = object.__class__.__name__ + items = [ + (f.name, getattr(object, f.name)) + for f in _dataclasses.fields(object) + if f.repr + ] + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch: Dict[ + Callable[..., str], + Callable[["PrettyPrinter", Any, IO[str], int, int, Set[int], int], None], + ] = {} + + def _pprint_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + write = stream.write + write("{") + items = sorted(object.items(), key=_safe_tuple) + self._format_dict_items(items, stream, indent, allowance, context, level) + write("}") + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + "(") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + stream.write("[") + self._format_items(object, stream, indent, allowance, context, level) + stream.write("]") + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + stream.write("(") + self._format_items(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write("{") + endchar = "}" + else: + stream.write(typ.__name__ + "({") + endchar = "})" + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance, context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r"\S*\s*", line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = "" + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write("(") + for i, rep in enumerate(chunks): + if i > 0: + write("\n" + " " * indent) + write(rep) + if level == 1: + write(")") + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write("(") + delim = "" + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = "\n" + " " * indent + if parens: + write(")") + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + write = stream.write + write("bytearray(") + self._pprint_bytes( + bytes(object), stream, indent + 10, allowance + 1, context, level + 1 + ) + write(")") + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + stream.write("mappingproxy(") + self._format(object.copy(), stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = "namespace" + else: + cls_name = object.__class__.__name__ + items = object.__dict__.items() + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items( + self, + items: List[Tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(self._repr(key, context, level)) + write(": ") + self._format(ent, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _format_namespace_items( + self, + items: List[Tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(key) + write("=") + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format( + ent, + stream, + item_indent + len(key) + 1, + 1, + context, + level, + ) + + write(",") + + write("\n" + " " * indent) + + def _format_items( + self, + items: List[Any], + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + + for item in items: + write(delimnl) + self._format(item, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _repr(self, object: Any, context: Set[int], level: int) -> str: + return self._safe_repr(object, context.copy(), self._depth, level) + + def _pprint_default_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + rdf = self._repr(object.default_factory, context, level) + stream.write(f"{object.__class__.__name__}({rdf}, ") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + + if object: + stream.write("{") + items = object.most_common() + self._format_dict_items(items, stream, indent, allowance, context, level) + stream.write("}") + + stream.write(")") + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])): + stream.write(repr(object)) + return + + stream.write(object.__class__.__name__ + "(") + self._format_items(object.maps, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + if object.maxlen is not None: + stream.write("maxlen=%d, " % object.maxlen) + stream.write("[") + + self._format_items(object, stream, indent, allowance + 1, context, level) + stream.write("])") + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: Set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr( + self, object: Any, context: Set[int], maxlevels: Optional[int], level: int + ) -> str: + typ = type(object) + if typ in _builtin_scalars: + return repr(object) + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}" + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}" + if objid in context: + return _recursion(object) + context.add(objid) + components: List[str] = [] + append = components.append + level += 1 + for k, v in sorted(object.items(), key=_safe_tuple): + krepr = self._safe_repr(k, context, maxlevels, level) + vrepr = self._safe_repr(v, context, maxlevels, level) + append(f"{krepr}: {vrepr}") + context.remove(objid) + return "{%s}" % ", ".join(components) + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]" + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()" + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "..." + if objid in context: + return _recursion(object) + context.add(objid) + components = [] + append = components.append + level += 1 + for o in object: + orepr = self._safe_repr(o, context, maxlevels, level) + append(orepr) + context.remove(objid) + return format % ", ".join(components) + + return repr(object) + + +_builtin_scalars = frozenset( + {str, bytes, bytearray, float, complex, bool, type(None), int} +) + + +def _recursion(object: Any) -> str: + return f"<Recursion on {type(object).__name__} with id={id(object)}>" + + +def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]: + current = b"" + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i : i + 4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/contrib/python/pytest/py3/_pytest/_io/saferepr.py b/contrib/python/pytest/py3/_pytest/_io/saferepr.py index c701872238c..9f33fced676 100644 --- a/contrib/python/pytest/py3/_pytest/_io/saferepr.py +++ b/contrib/python/pytest/py3/_pytest/_io/saferepr.py @@ -1,8 +1,5 @@ import pprint import reprlib -from typing import Any -from typing import Dict -from typing import IO from typing import Optional @@ -22,8 +19,8 @@ def _format_repr_exception(exc: BaseException, obj: object) -> str: raise except BaseException as exc: exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})" - return "<[{} raised in repr()] {} object at 0x{:x}>".format( - exc_info, type(obj).__name__, id(obj) + return ( + f"<[{exc_info} raised in repr()] {type(obj).__name__} object at 0x{id(obj):x}>" ) @@ -111,7 +108,6 @@ def saferepr( This function is a wrapper around the Repr/reprlib functionality of the stdlib. """ - return SafeRepr(maxsize, use_ascii).repr(obj) @@ -132,49 +128,3 @@ def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: return repr(obj) except Exception as exc: return _format_repr_exception(exc, obj) - - -class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter): - """PrettyPrinter that always dispatches (regardless of width).""" - - def _format( - self, - object: object, - stream: IO[str], - indent: int, - allowance: int, - context: Dict[int, Any], - level: int, - ) -> None: - # Type ignored because _dispatch is private. - p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined] - - objid = id(object) - if objid in context or p is None: - # Type ignored because _format is private. - super()._format( # type: ignore[misc] - object, - stream, - indent, - allowance, - context, - level, - ) - return - - context[objid] = 1 - p(self, object, stream, indent, allowance, context, level + 1) - del context[objid] - - -def _pformat_dispatch( - object: object, - indent: int = 1, - width: int = 80, - depth: Optional[int] = None, - *, - compact: bool = False, -) -> str: - return AlwaysDispatchingPrettyPrinter( - indent=indent, width=width, depth=depth, compact=compact - ).pformat(object) diff --git a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py b/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py index 379035d858c..16449b780c7 100644 --- a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py +++ b/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py @@ -1,13 +1,15 @@ """Helper functions for writing to terminals and files.""" + import os import shutil import sys +from typing import final +from typing import Literal from typing import Optional from typing import Sequence from typing import TextIO from .wcwidth import wcswidth -from _pytest.compat import final # This code was initially copied from py 1.8.1, file _io/terminalwriter.py. @@ -28,9 +30,9 @@ def should_do_markup(file: TextIO) -> bool: return True if os.environ.get("PY_COLORS") == "0": return False - if "NO_COLOR" in os.environ: + if os.environ.get("NO_COLOR"): return False - if "FORCE_COLOR" in os.environ: + if os.environ.get("FORCE_COLOR"): return True return ( hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" @@ -182,9 +184,7 @@ class TerminalWriter: """ if indents and len(indents) != len(lines): raise ValueError( - "indents size ({}) should have same size as lines ({})".format( - len(indents), len(lines) - ) + f"indents size ({len(indents)}) should have same size as lines ({len(lines)})" ) if not indents: indents = [""] * len(lines) @@ -193,15 +193,22 @@ class TerminalWriter: for indent, new_line in zip(indents, new_lines): self.line(indent + new_line) - def _highlight(self, source: str) -> str: - """Highlight the given source code if we have markup support.""" + def _highlight( + self, source: str, lexer: Literal["diff", "python"] = "python" + ) -> str: + """Highlight the given source if we have markup support.""" from _pytest.config.exceptions import UsageError - if not self.hasmarkup or not self.code_highlight: + if not source or not self.hasmarkup or not self.code_highlight: return source + try: from pygments.formatters.terminal import TerminalFormatter - from pygments.lexers.python import PythonLexer + + if lexer == "python": + from pygments.lexers.python import PythonLexer as Lexer + elif lexer == "diff": + from pygments.lexers.diff import DiffLexer as Lexer from pygments import highlight import pygments.util except ImportError: @@ -210,13 +217,21 @@ class TerminalWriter: try: highlighted: str = highlight( source, - PythonLexer(), + Lexer(), TerminalFormatter( bg=os.getenv("PYTEST_THEME_MODE", "dark"), style=os.getenv("PYTEST_THEME"), ), ) - return highlighted + # pygments terminal formatter may add a newline when there wasn't one. + # We don't want this, remove. + if highlighted[-1] == "\n" and source[-1] != "\n": + highlighted = highlighted[:-1] + + # Some lexers will not set the initial color explicitly + # which may lead to the previous color being propagated to the + # start of the expression, so reset first. + return "\x1b[0m" + highlighted except pygments.util.ClassNotFound: raise UsageError( "PYTEST_THEME environment variable had an invalid value: '{}'. " diff --git a/contrib/python/pytest/py3/_pytest/_io/wcwidth.py b/contrib/python/pytest/py3/_pytest/_io/wcwidth.py index e5c7bf4d868..53803133519 100644 --- a/contrib/python/pytest/py3/_pytest/_io/wcwidth.py +++ b/contrib/python/pytest/py3/_pytest/_io/wcwidth.py @@ -1,5 +1,5 @@ -import unicodedata from functools import lru_cache +import unicodedata @lru_cache(100) diff --git a/contrib/python/pytest/py3/_pytest/_py/error.py b/contrib/python/pytest/py3/_pytest/_py/error.py index 0b8f2d535ef..68f1eed7ec0 100644 --- a/contrib/python/pytest/py3/_pytest/_py/error.py +++ b/contrib/python/pytest/py3/_pytest/_py/error.py @@ -1,4 +1,5 @@ """create errno-specific classes for IO or os calls.""" + from __future__ import annotations import errno @@ -8,6 +9,7 @@ from typing import Callable from typing import TYPE_CHECKING from typing import TypeVar + if TYPE_CHECKING: from typing_extensions import ParamSpec diff --git a/contrib/python/pytest/py3/_pytest/_py/path.py b/contrib/python/pytest/py3/_pytest/_py/path.py index 73a070d19a8..232be617ae5 100644 --- a/contrib/python/pytest/py3/_pytest/_py/path.py +++ b/contrib/python/pytest/py3/_pytest/_py/path.py @@ -1,16 +1,13 @@ """local path implementation.""" + from __future__ import annotations import atexit +from contextlib import contextmanager import fnmatch import importlib.util import io import os -import posixpath -import sys -import uuid -import warnings -from contextlib import contextmanager from os.path import abspath from os.path import dirname from os.path import exists @@ -19,19 +16,22 @@ from os.path import isdir from os.path import isfile from os.path import islink from os.path import normpath +import posixpath from stat import S_ISDIR from stat import S_ISLNK from stat import S_ISREG +import sys from typing import Any from typing import Callable from typing import cast +from typing import Literal from typing import overload from typing import TYPE_CHECKING +import uuid +import warnings from . import error -if TYPE_CHECKING: - from typing import Literal # Moved from local.py. iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") @@ -677,7 +677,7 @@ class LocalPath: else: kw.setdefault("dirname", dirname) kw.setdefault("sep", self.sep) - obj.strpath = normpath("%(dirname)s%(sep)s%(basename)s" % kw) + obj.strpath = normpath("{dirname}{sep}{basename}".format(**kw)) return obj def _getbyspec(self, spec: str) -> list[str]: @@ -757,7 +757,16 @@ class LocalPath: if ensure: self.dirpath().ensure(dir=1) if encoding: - return error.checked_call(io.open, self.strpath, mode, encoding=encoding) + # Using type ignore here because of this error: + # error: Argument 1 has incompatible type overloaded function; + # expected "Callable[[str, Any, Any], TextIOWrapper]" [arg-type] + # Which seems incorrect, given io.open supports the given argument types. + return error.checked_call( + io.open, + self.strpath, + mode, + encoding=encoding, # type:ignore[arg-type] + ) return error.checked_call(open, self.strpath, mode) def _fastjoin(self, name): @@ -775,11 +784,11 @@ class LocalPath: valid checkers:: - file=1 # is a file - file=0 # is not a file (may not even exist) - dir=1 # is a dir - link=1 # is a link - exists=1 # exists + file = 1 # is a file + file = 0 # is not a file (may not even exist) + dir = 1 # is a dir + link = 1 # is a link + exists = 1 # exists You can specify multiple checker definitions, for example:: @@ -1163,7 +1172,8 @@ class LocalPath: where the 'self' path points to executable. The process is directly invoked and not through a system shell. """ - from subprocess import Popen, PIPE + from subprocess import PIPE + from subprocess import Popen popen_opts.pop("stdout", None) popen_opts.pop("stderr", None) @@ -1263,13 +1273,20 @@ class LocalPath: @classmethod def mkdtemp(cls, rootdir=None): """Return a Path object pointing to a fresh new temporary directory - (which we created ourself). + (which we created ourselves). """ import tempfile if rootdir is None: rootdir = cls.get_temproot() - return cls(error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) + # Using type ignore here because of this error: + # error: Argument 1 has incompatible type overloaded function; expected "Callable[[str], str]" [arg-type] + # Which seems incorrect, given tempfile.mkdtemp supports the given argument types. + path = error.checked_call( + tempfile.mkdtemp, + dir=str(rootdir), # type:ignore[arg-type] + ) + return cls(path) @classmethod def make_numbered_dir( diff --git a/contrib/python/pytest/py3/_pytest/_version.py b/contrib/python/pytest/py3/_pytest/_version.py index 458d0659289..01eb2b67c95 100644 --- a/contrib/python/pytest/py3/_pytest/_version.py +++ b/contrib/python/pytest/py3/_pytest/_version.py @@ -12,5 +12,5 @@ __version__: str __version_tuple__: VERSION_TUPLE version_tuple: VERSION_TUPLE -__version__ = version = '7.4.4' -__version_tuple__ = version_tuple = (7, 4, 4) +__version__ = version = '8.0.2' +__version_tuple__ = version_tuple = (8, 0, 2) diff --git a/contrib/python/pytest/py3/_pytest/assertion/__init__.py b/contrib/python/pytest/py3/_pytest/assertion/__init__.py index a46e58136ba..2bce0ec7cb5 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/__init__.py +++ b/contrib/python/pytest/py3/_pytest/assertion/__init__.py @@ -1,4 +1,5 @@ """Support for presenting detailed information in failing assertions.""" + import sys from typing import Any from typing import Generator @@ -15,6 +16,7 @@ from _pytest.config import hookimpl from _pytest.config.argparsing import Parser from _pytest.nodes import Item + if TYPE_CHECKING: from _pytest.main import Session @@ -42,6 +44,14 @@ def pytest_addoption(parser: Parser) -> None: help="Enables the pytest_assertion_pass hook. " "Make sure to delete any previously generated pyc cache files.", ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_ASSERTIONS, + help=( + "Specify a verbosity level for assertions, overriding the main level. " + "Higher levels will provide more detailed explanation when an assertion fails." + ), + ) def register_assert_rewrite(*names: str) -> None: @@ -112,15 +122,14 @@ def pytest_collection(session: "Session") -> None: assertstate.hook.set_session(session) -@hookimpl(tryfirst=True, hookwrapper=True) -def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. The rewrite module will use util._reprcompare if it exists to use custom reporting via the pytest_assertrepr_compare hook. This sets up this custom comparison for the test. """ - ihook = item.ihook def callbinrepr(op, left: object, right: object) -> Optional[str]: @@ -162,10 +171,11 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: util._assertion_pass = call_assertion_pass_hook - yield - - util._reprcompare, util._assertion_pass = saved_assert_hooks - util._config = None + try: + return (yield) + finally: + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None def pytest_sessionfinish(session: "Session") -> None: diff --git a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py b/contrib/python/pytest/py3/_pytest/assertion/rewrite.py index d1974bb3b4a..0ab6eaa1393 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py +++ b/contrib/python/pytest/py3/_pytest/assertion/rewrite.py @@ -1,5 +1,7 @@ """Rewrite assertion AST to produce nice error messages.""" + import ast +from collections import defaultdict import errno import functools import importlib.abc @@ -9,13 +11,12 @@ import io import itertools import marshal import os +from pathlib import Path +from pathlib import PurePath import struct import sys import tokenize import types -from collections import defaultdict -from pathlib import Path -from pathlib import PurePath from typing import Callable from typing import Dict from typing import IO @@ -33,29 +34,20 @@ from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE from _pytest._io.saferepr import saferepr from _pytest._version import version from _pytest.assertion import util -from _pytest.assertion.util import ( # noqa: F401 - format_explanation as _format_explanation, -) from _pytest.config import Config from _pytest.main import Session from _pytest.pathlib import absolutepath from _pytest.pathlib import fnmatch_ex from _pytest.stash import StashKey + +# fmt: off +from _pytest.assertion.util import format_explanation as _format_explanation # noqa:F401, isort:skip +# fmt:on + if TYPE_CHECKING: from _pytest.assertion import AssertionState -if sys.version_info >= (3, 8): - namedExpr = ast.NamedExpr - astNameConstant = ast.Constant - astStr = ast.Constant - astNum = ast.Constant -else: - namedExpr = ast.Expr - astNameConstant = ast.NameConstant - astStr = ast.Str - astNum = ast.Num - class Sentinel: pass @@ -437,7 +429,10 @@ def _saferepr(obj: object) -> str: def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]: """Get `maxsize` configuration for saferepr based on the given config object.""" - verbosity = config.getoption("verbose") if config is not None else 0 + if config is None: + verbosity = 0 + else: + verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) if verbosity >= 2: return None if verbosity >= 1: @@ -604,13 +599,6 @@ def _get_assertion_exprs(src: bytes) -> Dict[int, str]: return ret -def _get_ast_constant_value(value: astStr) -> object: - if sys.version_info >= (3, 8): - return value.value - else: - return value.s - - class AssertionRewriter(ast.NodeVisitor): """Assertion rewriting implementation. @@ -706,11 +694,10 @@ class AssertionRewriter(ast.NodeVisitor): if ( expect_docstring and isinstance(item, ast.Expr) - and isinstance(item.value, astStr) - and isinstance(_get_ast_constant_value(item.value), str) + and isinstance(item.value, ast.Constant) + and isinstance(item.value.value, str) ): - doc = _get_ast_constant_value(item.value) - assert isinstance(doc, str) + doc = item.value.value if self.is_rewrite_disabled(doc): return expect_docstring = False @@ -850,7 +837,7 @@ class AssertionRewriter(ast.NodeVisitor): current = self.stack.pop() if self.stack: self.explanation_specifiers = self.stack[-1] - keys = [astStr(key) for key in current.keys()] + keys = [ast.Constant(key) for key in current.keys()] format_dict = ast.Dict(keys, list(current.values())) form = ast.BinOp(expl_expr, ast.Mod(), format_dict) name = "@py_format" + str(next(self.variable_counter)) @@ -874,9 +861,10 @@ class AssertionRewriter(ast.NodeVisitor): the expression is false. """ if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: - from _pytest.warning_types import PytestAssertRewriteWarning import warnings + from _pytest.warning_types import PytestAssertRewriteWarning + # TODO: This assert should not be needed. assert self.module_path is not None warnings.warn_explicit( @@ -904,16 +892,16 @@ class AssertionRewriter(ast.NodeVisitor): negation = ast.UnaryOp(ast.Not(), top_condition) if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook - msg = self.pop_format_context(astStr(explanation)) + msg = self.pop_format_context(ast.Constant(explanation)) # Failed if assert_.msg: assertmsg = self.helper("_format_assertmsg", assert_.msg) gluestr = "\n>assert " else: - assertmsg = astStr("") + assertmsg = ast.Constant("") gluestr = "assert " - err_explanation = ast.BinOp(astStr(gluestr), ast.Add(), msg) + err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg) err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) err_name = ast.Name("AssertionError", ast.Load()) fmt = self.helper("_format_explanation", err_msg) @@ -929,8 +917,8 @@ class AssertionRewriter(ast.NodeVisitor): hook_call_pass = ast.Expr( self.helper( "_call_assertion_pass", - astNum(assert_.lineno), - astStr(orig), + ast.Constant(assert_.lineno), + ast.Constant(orig), fmt_pass, ) ) @@ -949,7 +937,7 @@ class AssertionRewriter(ast.NodeVisitor): variables = [ ast.Name(name, ast.Store()) for name in self.format_variables ] - clear_format = ast.Assign(variables, astNameConstant(None)) + clear_format = ast.Assign(variables, ast.Constant(None)) self.statements.append(clear_format) else: # Original assertion rewriting @@ -960,9 +948,9 @@ class AssertionRewriter(ast.NodeVisitor): assertmsg = self.helper("_format_assertmsg", assert_.msg) explanation = "\n>assert " + explanation else: - assertmsg = astStr("") + assertmsg = ast.Constant("") explanation = "assert " + explanation - template = ast.BinOp(assertmsg, ast.Add(), astStr(explanation)) + template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation)) msg = self.pop_format_context(template) fmt = self.helper("_format_explanation", msg) err_name = ast.Name("AssertionError", ast.Load()) @@ -974,7 +962,7 @@ class AssertionRewriter(ast.NodeVisitor): # Clear temporary variables by setting them to None. if self.variables: variables = [ast.Name(name, ast.Store()) for name in self.variables] - clear = ast.Assign(variables, astNameConstant(None)) + clear = ast.Assign(variables, ast.Constant(None)) self.statements.append(clear) # Fix locations (line numbers/column offsets). for stmt in self.statements: @@ -982,26 +970,26 @@ class AssertionRewriter(ast.NodeVisitor): ast.copy_location(node, assert_) return self.statements - def visit_NamedExpr(self, name: namedExpr) -> Tuple[namedExpr, str]: + def visit_NamedExpr(self, name: ast.NamedExpr) -> Tuple[ast.NamedExpr, str]: # This method handles the 'walrus operator' repr of the target # name if it's a local variable or _should_repr_global_name() # thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], []) target_id = name.target.id # type: ignore[attr-defined] - inlocs = ast.Compare(astStr(target_id), [ast.In()], [locs]) + inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs]) dorepr = self.helper("_should_repr_global_name", name) test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) - expr = ast.IfExp(test, self.display(name), astStr(target_id)) + expr = ast.IfExp(test, self.display(name), ast.Constant(target_id)) return name, self.explanation_param(expr) def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]: # Display the repr of the name if it's a local variable or # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], []) - inlocs = ast.Compare(astStr(name.id), [ast.In()], [locs]) + inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs]) dorepr = self.helper("_should_repr_global_name", name) test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) - expr = ast.IfExp(test, self.display(name), astStr(name.id)) + expr = ast.IfExp(test, self.display(name), ast.Constant(name.id)) return name, self.explanation_param(expr) def visit_BoolOp(self, boolop: ast.BoolOp) -> Tuple[ast.Name, str]: @@ -1020,10 +1008,10 @@ class AssertionRewriter(ast.NodeVisitor): # cond is set in a prior loop iteration below self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa self.expl_stmts = fail_inner - # Check if the left operand is a namedExpr and the value has already been visited + # Check if the left operand is a ast.NamedExpr and the value has already been visited if ( isinstance(v, ast.Compare) - and isinstance(v.left, namedExpr) + and isinstance(v.left, ast.NamedExpr) and v.left.target.id in [ ast_expr.id @@ -1032,14 +1020,12 @@ class AssertionRewriter(ast.NodeVisitor): ] ): pytest_temp = self.variable() - self.variables_overwrite[self.scope][ - v.left.target.id - ] = v.left # type:ignore[assignment] + self.variables_overwrite[self.scope][v.left.target.id] = v.left # type:ignore[assignment] v.left.target.id = pytest_temp self.push_format_context() res, expl = self.visit(v) body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) - expl_format = self.pop_format_context(astStr(expl)) + expl_format = self.pop_format_context(ast.Constant(expl)) call = ast.Call(app, [expl_format], []) self.expl_stmts.append(ast.Expr(call)) if i < levels: @@ -1051,7 +1037,7 @@ class AssertionRewriter(ast.NodeVisitor): self.statements = body = inner self.statements = save self.expl_stmts = fail_save - expl_template = self.helper("_format_boolop", expl_list, astNum(is_or)) + expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or)) expl = self.pop_format_context(expl_template) return ast.Name(res_var, ast.Load()), self.explanation_param(expl) @@ -1078,9 +1064,7 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( self.scope, {} ): - arg = self.variables_overwrite[self.scope][ - arg.id - ] # type:ignore[assignment] + arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment] res, expl = self.visit(arg) arg_expls.append(expl) new_args.append(res) @@ -1088,9 +1072,7 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance( keyword.value, ast.Name ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): - keyword.value = self.variables_overwrite[self.scope][ - keyword.value.id - ] # type:ignore[assignment] + keyword.value = self.variables_overwrite[self.scope][keyword.value.id] # type:ignore[assignment] res, expl = self.visit(keyword.value) new_kwargs.append(ast.keyword(keyword.arg, res)) if keyword.arg: @@ -1127,13 +1109,9 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance( comp.left, ast.Name ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): - comp.left = self.variables_overwrite[self.scope][ - comp.left.id - ] # type:ignore[assignment] - if isinstance(comp.left, namedExpr): - self.variables_overwrite[self.scope][ - comp.left.target.id - ] = comp.left # type:ignore[assignment] + comp.left = self.variables_overwrite[self.scope][comp.left.id] # type:ignore[assignment] + if isinstance(comp.left, ast.NamedExpr): + self.variables_overwrite[self.scope][comp.left.target.id] = comp.left # type:ignore[assignment] left_res, left_expl = self.visit(comp.left) if isinstance(comp.left, (ast.Compare, ast.BoolOp)): left_expl = f"({left_expl})" @@ -1146,22 +1124,20 @@ class AssertionRewriter(ast.NodeVisitor): results = [left_res] for i, op, next_operand in it: if ( - isinstance(next_operand, namedExpr) + isinstance(next_operand, ast.NamedExpr) and isinstance(left_res, ast.Name) and next_operand.target.id == left_res.id ): next_operand.target.id = self.variable() - self.variables_overwrite[self.scope][ - left_res.id - ] = next_operand # type:ignore[assignment] + self.variables_overwrite[self.scope][left_res.id] = next_operand # type:ignore[assignment] next_res, next_expl = self.visit(next_operand) if isinstance(next_operand, (ast.Compare, ast.BoolOp)): next_expl = f"({next_expl})" results.append(next_res) sym = BINOP_MAP[op.__class__] - syms.append(astStr(sym)) + syms.append(ast.Constant(sym)) expl = f"{left_expl} {sym} {next_expl}" - expls.append(astStr(expl)) + expls.append(ast.Constant(expl)) res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl @@ -1205,7 +1181,7 @@ def try_makedirs(cache_dir: Path) -> bool: def get_cache_dir(file_path: Path) -> Path: """Return the cache directory to write .pyc files for the given .py file path.""" - if sys.version_info >= (3, 8) and sys.pycache_prefix: + if sys.pycache_prefix: # given: # prefix = '/tmp/pycs' # path = '/home/user/proj/test_app.py' diff --git a/contrib/python/pytest/py3/_pytest/assertion/truncate.py b/contrib/python/pytest/py3/_pytest/assertion/truncate.py index dfd6f65d281..902d4baf846 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/truncate.py +++ b/contrib/python/pytest/py3/_pytest/assertion/truncate.py @@ -1,12 +1,14 @@ """Utilities for truncating assertion output. Current default behaviour is to truncate assertion explanations at -~8 terminal lines, unless running in "-vv" mode or running on CI. +terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI. """ + from typing import List from typing import Optional from _pytest.assertion import util +from _pytest.config import Config from _pytest.nodes import Item @@ -26,7 +28,7 @@ def truncate_if_required( def _should_truncate_item(item: Item) -> bool: """Whether or not this test item is eligible for truncation.""" - verbose = item.config.option.verbose + verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) return verbose < 2 and not util.running_on_ci() diff --git a/contrib/python/pytest/py3/_pytest/assertion/util.py b/contrib/python/pytest/py3/_pytest/assertion/util.py index 39ca5403e04..a7074115d65 100644 --- a/contrib/python/pytest/py3/_pytest/assertion/util.py +++ b/contrib/python/pytest/py3/_pytest/assertion/util.py @@ -1,4 +1,5 @@ """Utilities for assertion debugging.""" + import collections.abc import os import pprint @@ -7,18 +8,21 @@ from typing import Any from typing import Callable from typing import Iterable from typing import List +from typing import Literal from typing import Mapping from typing import Optional +from typing import Protocol from typing import Sequence from unicodedata import normalize -import _pytest._code from _pytest import outcomes -from _pytest._io.saferepr import _pformat_dispatch +import _pytest._code +from _pytest._io.pprint import PrettyPrinter from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr_unlimited from _pytest.config import Config + # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was # loaded and in turn call the hooks defined here as part of the @@ -33,6 +37,11 @@ _assertion_pass: Optional[Callable[[int, str, str], None]] = None _config: Optional[Config] = None +class _HighlightFunc(Protocol): + def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Apply highlighting to the given source.""" + + def format_explanation(explanation: str) -> str: r"""Format an explanation. @@ -161,7 +170,7 @@ def assertrepr_compare( config, op: str, left: Any, right: Any, use_ascii: bool = False ) -> Optional[List[str]]: """Return specialised explanations for some operators/operands.""" - verbose = config.getoption("verbose") + verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. # See issue #3246. @@ -185,14 +194,31 @@ def assertrepr_compare( right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) summary = f"{left_repr} {op} {right_repr}" + highlighter = config.get_terminal_writer()._highlight explanation = None try: if op == "==": - explanation = _compare_eq_any(left, right, verbose) + explanation = _compare_eq_any(left, right, highlighter, verbose) elif op == "not in": if istext(left) and istext(right): explanation = _notin_text(left, right, verbose) + elif op == "!=": + if isset(left) and isset(right): + explanation = ["Both sets are equal"] + elif op == ">=": + if isset(left) and isset(right): + explanation = _compare_gte_set(left, right, highlighter, verbose) + elif op == "<=": + if isset(left) and isset(right): + explanation = _compare_lte_set(left, right, highlighter, verbose) + elif op == ">": + if isset(left) and isset(right): + explanation = _compare_gt_set(left, right, highlighter, verbose) + elif op == "<": + if isset(left) and isset(right): + explanation = _compare_lt_set(left, right, highlighter, verbose) + except outcomes.Exit: raise except Exception: @@ -206,10 +232,14 @@ def assertrepr_compare( if not explanation: return None + if explanation[0] != "": + explanation = [""] + explanation return [summary] + explanation -def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: +def _compare_eq_any( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0 +) -> List[str]: explanation = [] if istext(left) and istext(right): explanation = _diff_text(left, right, verbose) @@ -222,23 +252,23 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: other_side = right if isinstance(left, ApproxBase) else left explanation = approx_side._repr_compare(other_side) - elif type(left) == type(right) and ( + elif type(left) is type(right) and ( isdatacls(left) or isattrs(left) or isnamedtuple(left) ): # Note: unlike dataclasses/attrs, namedtuples compare only the # field values, not the type or field names. But this branch # intentionally only handles the same-type case, which was often # used in older code bases before dataclasses/attrs were available. - explanation = _compare_eq_cls(left, right, verbose) + explanation = _compare_eq_cls(left, right, highlighter, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right, verbose) + explanation = _compare_eq_sequence(left, right, highlighter, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right, verbose) + explanation = _compare_eq_set(left, right, highlighter, verbose) elif isdict(left) and isdict(right): - explanation = _compare_eq_dict(left, right, verbose) + explanation = _compare_eq_dict(left, right, highlighter, verbose) if isiterable(left) and isiterable(right): - expl = _compare_eq_iterable(left, right, verbose) + expl = _compare_eq_iterable(left, right, highlighter, verbose) explanation.extend(expl) return explanation @@ -273,8 +303,8 @@ def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]: if i > 42: i -= 10 # Provide some context explanation += [ - "Skipping {} identical trailing " - "characters in diff, use -v to show".format(i) + f"Skipping {i} identical trailing " + "characters in diff, use -v to show" ] left = left[:-i] right = right[:-i] @@ -292,51 +322,40 @@ def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]: return explanation -def _surrounding_parens_on_own_lines(lines: List[str]) -> None: - """Move opening/closing parenthesis/bracket to own lines.""" - opening = lines[0][:1] - if opening in ["(", "[", "{"]: - lines[0] = " " + lines[0][1:] - lines[:] = [opening] + lines - closing = lines[-1][-1:] - if closing in [")", "]", "}"]: - lines[-1] = lines[-1][:-1] + "," - lines[:] = lines + [closing] - - def _compare_eq_iterable( - left: Iterable[Any], right: Iterable[Any], verbose: int = 0 + left: Iterable[Any], + right: Iterable[Any], + highligher: _HighlightFunc, + verbose: int = 0, ) -> List[str]: if verbose <= 0 and not running_on_ci(): return ["Use -v to get more diff"] # dynamic import to speedup pytest import difflib - left_formatting = pprint.pformat(left).splitlines() - right_formatting = pprint.pformat(right).splitlines() + left_formatting = PrettyPrinter().pformat(left).splitlines() + right_formatting = PrettyPrinter().pformat(right).splitlines() - # Re-format for different output lengths. - lines_left = len(left_formatting) - lines_right = len(right_formatting) - if lines_left != lines_right: - left_formatting = _pformat_dispatch(left).splitlines() - right_formatting = _pformat_dispatch(right).splitlines() - - if lines_left > 1 or lines_right > 1: - _surrounding_parens_on_own_lines(left_formatting) - _surrounding_parens_on_own_lines(right_formatting) - - explanation = ["Full diff:"] + explanation = ["", "Full diff:"] # "right" is the expected base against which we compare "left", # see https://github.com/pytest-dev/pytest/issues/3333 explanation.extend( - line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting) + highligher( + "\n".join( + line.rstrip() + for line in difflib.ndiff(right_formatting, left_formatting) + ), + lexer="diff", + ).splitlines() ) return explanation def _compare_eq_sequence( - left: Sequence[Any], right: Sequence[Any], verbose: int = 0 + left: Sequence[Any], + right: Sequence[Any], + highlighter: _HighlightFunc, + verbose: int = 0, ) -> List[str]: comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) explanation: List[str] = [] @@ -359,7 +378,10 @@ def _compare_eq_sequence( left_value = left[i] right_value = right[i] - explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"] + explanation.append( + f"At index {i} diff:" + f" {highlighter(repr(left_value))} != {highlighter(repr(right_value))}" + ) break if comparing_bytes: @@ -379,34 +401,91 @@ def _compare_eq_sequence( extra = saferepr(right[len_left]) if len_diff == 1: - explanation += [f"{dir_with_more} contains one more item: {extra}"] + explanation += [ + f"{dir_with_more} contains one more item: {highlighter(extra)}" + ] else: explanation += [ "%s contains %d more items, first extra item: %s" - % (dir_with_more, len_diff, extra) + % (dir_with_more, len_diff, highlighter(extra)) ] return explanation def _compare_eq_set( - left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0 + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> List[str]: + explanation = [] + explanation.extend(_set_one_sided_diff("left", left, right, highlighter)) + explanation.extend(_set_one_sided_diff("right", right, left, highlighter)) + return explanation + + +def _compare_gt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> List[str]: + explanation = _compare_gte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_lt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> List[str]: + explanation = _compare_lte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_gte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> List[str]: + return _set_one_sided_diff("right", right, left, highlighter) + + +def _compare_lte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> List[str]: + return _set_one_sided_diff("left", left, right, highlighter) + + +def _set_one_sided_diff( + posn: str, + set1: AbstractSet[Any], + set2: AbstractSet[Any], + highlighter: _HighlightFunc, ) -> List[str]: explanation = [] - diff_left = left - right - diff_right = right - left - if diff_left: - explanation.append("Extra items in the left set:") - for item in diff_left: - explanation.append(saferepr(item)) - if diff_right: - explanation.append("Extra items in the right set:") - for item in diff_right: - explanation.append(saferepr(item)) + diff = set1 - set2 + if diff: + explanation.append(f"Extra items in the {posn} set:") + for item in diff: + explanation.append(highlighter(saferepr(item))) return explanation def _compare_eq_dict( - left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0 + left: Mapping[Any, Any], + right: Mapping[Any, Any], + highlighter: _HighlightFunc, + verbose: int = 0, ) -> List[str]: explanation: List[str] = [] set_left = set(left) @@ -417,12 +496,16 @@ def _compare_eq_dict( explanation += ["Omitting %s identical items, use -vv to show" % len(same)] elif same: explanation += ["Common items:"] - explanation += pprint.pformat(same).splitlines() + explanation += highlighter(pprint.pformat(same)).splitlines() diff = {k for k in common if left[k] != right[k]} if diff: explanation += ["Differing items:"] for k in diff: - explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] + explanation += [ + highlighter(saferepr({k: left[k]})) + + " != " + + highlighter(saferepr({k: right[k]})) + ] extra_left = set_left - set_right len_extra_left = len(extra_left) if len_extra_left: @@ -431,7 +514,7 @@ def _compare_eq_dict( % (len_extra_left, "" if len_extra_left == 1 else "s") ) explanation.extend( - pprint.pformat({k: left[k] for k in extra_left}).splitlines() + highlighter(pprint.pformat({k: left[k] for k in extra_left})).splitlines() ) extra_right = set_right - set_left len_extra_right = len(extra_right) @@ -441,12 +524,14 @@ def _compare_eq_dict( % (len_extra_right, "" if len_extra_right == 1 else "s") ) explanation.extend( - pprint.pformat({k: right[k] for k in extra_right}).splitlines() + highlighter(pprint.pformat({k: right[k] for k in extra_right})).splitlines() ) return explanation -def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: +def _compare_eq_cls( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int +) -> List[str]: if not has_default_eq(left): return [] if isdatacls(left): @@ -478,21 +563,23 @@ def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: explanation.append("Omitting %s identical items, use -vv to show" % len(same)) elif same: explanation += ["Matching attributes:"] - explanation += pprint.pformat(same).splitlines() + explanation += highlighter(pprint.pformat(same)).splitlines() if diff: explanation += ["Differing attributes:"] - explanation += pprint.pformat(diff).splitlines() + explanation += highlighter(pprint.pformat(diff)).splitlines() for field in diff: field_left = getattr(left, field) field_right = getattr(right, field) explanation += [ "", - "Drill down into differing attribute %s:" % field, - ("%s%s: %r != %r") % (indent, field, field_left, field_right), + f"Drill down into differing attribute {field}:", + f"{indent}{field}: {highlighter(repr(field_left))} != {highlighter(repr(field_right))}", ] explanation += [ indent + line - for line in _compare_eq_any(field_left, field_right, verbose) + for line in _compare_eq_any( + field_left, field_right, highlighter, verbose + ) ] return explanation diff --git a/contrib/python/pytest/py3/_pytest/cacheprovider.py b/contrib/python/pytest/py3/_pytest/cacheprovider.py index 1ecb8650580..49e38ed533f 100644 --- a/contrib/python/pytest/py3/_pytest/cacheprovider.py +++ b/contrib/python/pytest/py3/_pytest/cacheprovider.py @@ -1,4 +1,5 @@ """Implementation of the cache provider.""" + # This plugin was not named "cache" to avoid conflicts with the external # pytest-cache version. import dataclasses @@ -6,6 +7,7 @@ import json import os from pathlib import Path from typing import Dict +from typing import final from typing import Generator from typing import Iterable from typing import List @@ -18,7 +20,6 @@ from .pathlib import rm_rf from .reports import CollectReport from _pytest import nodes from _pytest._io import TerminalWriter -from _pytest.compat import final from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config import hookimpl @@ -27,10 +28,11 @@ from _pytest.deprecated import check_ispytest from _pytest.fixtures import fixture from _pytest.fixtures import FixtureRequest from _pytest.main import Session +from _pytest.nodes import Directory from _pytest.nodes import File -from _pytest.python import Package from _pytest.reports import TestReport + README_CONTENT = """\ # pytest cache directory # @@ -111,6 +113,7 @@ class Cache: """ check_ispytest(_ispytest) import warnings + from _pytest.warning_types import PytestCacheWarning warnings.warn( @@ -217,42 +220,34 @@ class LFPluginCollWrapper: self.lfplugin = lfplugin self._collected_at_least_one_failure = False - @hookimpl(hookwrapper=True) - def pytest_make_collect_report(self, collector: nodes.Collector): - if isinstance(collector, (Session, Package)): - out = yield - res: CollectReport = out.get_result() - + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Generator[None, CollectReport, CollectReport]: + res = yield + if isinstance(collector, (Session, Directory)): # Sort any lf-paths to the beginning. lf_paths = self.lfplugin._last_failed_paths # Use stable sort to priorize last failed. def sort_key(node: Union[nodes.Item, nodes.Collector]) -> bool: - # Package.path is the __init__.py file, we need the directory. - if isinstance(node, Package): - path = node.path.parent - else: - path = node.path - return path in lf_paths + return node.path in lf_paths res.result = sorted( res.result, key=sort_key, reverse=True, ) - return elif isinstance(collector, File): if collector.path in self.lfplugin._last_failed_paths: - out = yield - res = out.get_result() result = res.result lastfailed = self.lfplugin.lastfailed # Only filter with known failures. if not self._collected_at_least_one_failure: if not any(x.nodeid in lastfailed for x in result): - return + return res self.lfplugin.config.pluginmanager.register( LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" ) @@ -268,8 +263,8 @@ class LFPluginCollWrapper: # Keep all sub-collectors. or isinstance(x, nodes.Collector) ] - return - yield + + return res class LFPluginCollSkipfiles: @@ -280,9 +275,7 @@ class LFPluginCollSkipfiles: def pytest_make_collect_report( self, collector: nodes.Collector ) -> Optional[CollectReport]: - # Packages are Files, but we only want to skip test-bearing Files, - # so don't filter Packages. - if isinstance(collector, File) and not isinstance(collector, Package): + if isinstance(collector, File): if collector.path not in self.lfplugin._last_failed_paths: self.lfplugin._skipped_files += 1 @@ -342,14 +335,14 @@ class LFPlugin: else: self.lastfailed[report.nodeid] = True - @hookimpl(hookwrapper=True, tryfirst=True) + @hookimpl(wrapper=True, tryfirst=True) def pytest_collection_modifyitems( self, config: Config, items: List[nodes.Item] ) -> Generator[None, None, None]: - yield + res = yield if not self.active: - return + return res if self.lastfailed: previously_failed = [] @@ -376,15 +369,13 @@ class LFPlugin: noun = "failure" if self._previously_failed_count == 1 else "failures" suffix = " first" if self.config.getoption("failedfirst") else "" - self._report_status = "rerun previous {count} {noun}{suffix}".format( - count=self._previously_failed_count, suffix=suffix, noun=noun + self._report_status = ( + f"rerun previous {self._previously_failed_count} {noun}{suffix}" ) if self._skipped_files > 0: files_noun = "file" if self._skipped_files == 1 else "files" - self._report_status += " (skipped {files} {files_noun})".format( - files=self._skipped_files, files_noun=files_noun - ) + self._report_status += f" (skipped {self._skipped_files} {files_noun})" else: self._report_status = "no previously failed tests, " if self.config.getoption("last_failed_no_failures") == "none": @@ -394,6 +385,8 @@ class LFPlugin: else: self._report_status += "not deselecting items." + return res + def pytest_sessionfinish(self, session: Session) -> None: config = self.config if config.getoption("cacheshow") or hasattr(config, "workerinput"): @@ -414,11 +407,11 @@ class NFPlugin: assert config.cache is not None self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) - @hookimpl(hookwrapper=True, tryfirst=True) + @hookimpl(wrapper=True, tryfirst=True) def pytest_collection_modifyitems( self, items: List[nodes.Item] ) -> Generator[None, None, None]: - yield + res = yield if self.active: new_items: Dict[str, nodes.Item] = {} @@ -436,6 +429,8 @@ class NFPlugin: else: self.cached_nodeids.update(item.nodeid for item in items) + return res + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]: return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return] diff --git a/contrib/python/pytest/py3/_pytest/capture.py b/contrib/python/pytest/py3/_pytest/capture.py index a8ca0869f33..d4cabedba29 100644 --- a/contrib/python/pytest/py3/_pytest/capture.py +++ b/contrib/python/pytest/py3/_pytest/capture.py @@ -1,21 +1,25 @@ """Per-test stdout/stderr capturing mechanism.""" + import abc import collections import contextlib import io +from io import UnsupportedOperation import os import sys -from io import UnsupportedOperation from tempfile import TemporaryFile from types import TracebackType from typing import Any from typing import AnyStr from typing import BinaryIO +from typing import Final +from typing import final from typing import Generator from typing import Generic from typing import Iterable from typing import Iterator from typing import List +from typing import Literal from typing import NamedTuple from typing import Optional from typing import TextIO @@ -24,7 +28,6 @@ from typing import Type from typing import TYPE_CHECKING from typing import Union -from _pytest.compat import final from _pytest.config import Config from _pytest.config import hookimpl from _pytest.config.argparsing import Parser @@ -34,12 +37,10 @@ from _pytest.fixtures import SubRequest from _pytest.nodes import Collector from _pytest.nodes import File from _pytest.nodes import Item +from _pytest.reports import CollectReport -if TYPE_CHECKING: - from typing_extensions import Final - from typing_extensions import Literal - _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] +_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] def pytest_addoption(parser: Parser) -> None: @@ -132,8 +133,8 @@ def _windowsconsoleio_workaround(stream: TextIO) -> None: sys.stderr = _reopen_stdio(sys.stderr, "wb") -@hookimpl(hookwrapper=True) -def pytest_load_initial_conftests(early_config: Config): +@hookimpl(wrapper=True) +def pytest_load_initial_conftests(early_config: Config) -> Generator[None, None, None]: ns = early_config.known_args_namespace if ns.capture == "fd": _windowsconsoleio_workaround(sys.stdout) @@ -147,12 +148,16 @@ def pytest_load_initial_conftests(early_config: Config): # Finally trigger conftest loading but while capturing (issue #93). capman.start_global_capturing() - outcome = yield - capman.suspend_global_capture() - if outcome.excinfo is not None: + try: + try: + yield + finally: + capman.suspend_global_capture() + except BaseException: out, err = capman.read_global_capture() sys.stdout.write(out) sys.stderr.write(err) + raise # IO Helpers. @@ -585,7 +590,7 @@ if sys.version_info >= (3, 11) or TYPE_CHECKING: @final class CaptureResult(NamedTuple, Generic[AnyStr]): - """The result of :method:`CaptureFixture.readouterr`.""" + """The result of :method:`caplog.readouterr() <pytest.CaptureFixture.readouterr>`.""" out: AnyStr err: AnyStr @@ -595,7 +600,7 @@ else: class CaptureResult( collections.namedtuple("CaptureResult", ["out", "err"]), Generic[AnyStr] ): - """The result of :method:`CaptureFixture.readouterr`.""" + """The result of :method:`caplog.readouterr() <pytest.CaptureFixture.readouterr>`.""" __slots__ = () @@ -687,7 +692,7 @@ class MultiCapture(Generic[AnyStr]): return CaptureResult(out, err) # type: ignore[arg-type] -def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]: +def _get_multicapture(method: _CaptureMethod) -> MultiCapture[str]: if method == "fd": return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) elif method == "sys": @@ -723,7 +728,7 @@ class CaptureManager: needed to ensure the fixtures take precedence over the global capture. """ - def __init__(self, method: "_CaptureMethod") -> None: + def __init__(self, method: _CaptureMethod) -> None: self._method: Final = method self._global_capturing: Optional[MultiCapture[str]] = None self._capture_fixture: Optional[CaptureFixture[Any]] = None @@ -786,9 +791,7 @@ class CaptureManager: current_fixture = self._capture_fixture.request.fixturename requested_fixture = capture_fixture.request.fixturename capture_fixture.request.raiseerror( - "cannot use {} and {} at the same time".format( - requested_fixture, current_fixture - ) + f"cannot use {requested_fixture} and {current_fixture} at the same time" ) self._capture_fixture = capture_fixture @@ -843,41 +846,45 @@ class CaptureManager: self.deactivate_fixture() self.suspend_global_capture(in_=False) - out, err = self.read_global_capture() - item.add_report_section(when, "stdout", out) - item.add_report_section(when, "stderr", err) + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) # Hooks - @hookimpl(hookwrapper=True) - def pytest_make_collect_report(self, collector: Collector): + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: Collector + ) -> Generator[None, CollectReport, CollectReport]: if isinstance(collector, File): self.resume_global_capture() - outcome = yield - self.suspend_global_capture() + try: + rep = yield + finally: + self.suspend_global_capture() out, err = self.read_global_capture() - rep = outcome.get_result() if out: rep.sections.append(("Captured stdout", out)) if err: rep.sections.append(("Captured stderr", err)) else: - yield + rep = yield + return rep - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]: with self.item_capture("setup", item): - yield + return (yield) - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]: with self.item_capture("call", item): - yield + return (yield) - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]: with self.item_capture("teardown", item): - yield + return (yield) @hookimpl(tryfirst=True) def pytest_keyboard_interrupt(self) -> None: @@ -980,7 +987,6 @@ def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`. Example: - .. code-block:: python def test_output(capsys): @@ -1008,7 +1014,6 @@ def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`. Example: - .. code-block:: python def test_output(capsysbinary): @@ -1036,7 +1041,6 @@ def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`. Example: - .. code-block:: python def test_system_echo(capfd): @@ -1064,7 +1068,6 @@ def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, N Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`. Example: - .. code-block:: python def test_system_echo(capfdbinary): diff --git a/contrib/python/pytest/py3/_pytest/compat.py b/contrib/python/pytest/py3/_pytest/compat.py index 6fdb5a1d8eb..83c598c816f 100644 --- a/contrib/python/pytest/py3/_pytest/compat.py +++ b/contrib/python/pytest/py3/_pytest/compat.py @@ -1,37 +1,24 @@ """Python version compatibility code.""" + from __future__ import annotations import dataclasses import enum import functools import inspect -import os -import sys from inspect import Parameter from inspect import signature +import os from pathlib import Path +import sys from typing import Any from typing import Callable -from typing import Generic +from typing import Final from typing import NoReturn -from typing import TYPE_CHECKING from typing import TypeVar import _pytest._py.path as py_path -# fmt: off -# Workaround for https://github.com/sphinx-doc/sphinx/issues/10351. -# If `overload` is imported from `compat` instead of from `typing`, -# Sphinx doesn't recognize it as `overload` and the API docs for -# overloaded functions look good again. But type checkers handle -# it fine. -# fmt: on -if True: - from typing import overload as overload - -if TYPE_CHECKING: - from typing_extensions import Final - _T = TypeVar("_T") _S = TypeVar("_S") @@ -58,17 +45,6 @@ class NotSetType(enum.Enum): NOTSET: Final = NotSetType.token # noqa: E305 # fmt: on -if sys.version_info >= (3, 8): - import importlib.metadata - - importlib_metadata = importlib.metadata -else: - import importlib_metadata as importlib_metadata # noqa: F401 - - -def _format_args(func: Callable[..., Any]) -> str: - return str(signature(func)) - def is_generator(func: object) -> bool: genfunc = inspect.isgeneratorfunction(func) @@ -93,7 +69,7 @@ def is_async_function(func: object) -> bool: return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) -def getlocation(function, curdir: str | None = None) -> str: +def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: function = get_real_func(function) fn = Path(inspect.getfile(function)) lineno = function.__code__.co_firstlineno @@ -127,7 +103,7 @@ def num_mock_patch_args(function) -> int: def getfuncargnames( - function: Callable[..., Any], + function: Callable[..., object], *, name: str = "", is_method: bool = False, @@ -283,9 +259,7 @@ def get_real_func(obj): from _pytest._io.saferepr import saferepr raise ValueError( - ("could not find real function of {start}\nstopped at {current}").format( - start=saferepr(start_obj), current=saferepr(obj) - ) + f"could not find real function of {saferepr(start_obj)}\nstopped at {saferepr(obj)}" ) if isinstance(obj, functools.partial): obj = obj.func @@ -338,47 +312,6 @@ def safe_isclass(obj: object) -> bool: return False -if TYPE_CHECKING: - if sys.version_info >= (3, 8): - from typing import final as final - else: - from typing_extensions import final as final -elif sys.version_info >= (3, 8): - from typing import final as final -else: - - def final(f): - return f - - -if sys.version_info >= (3, 8): - from functools import cached_property as cached_property -else: - - class cached_property(Generic[_S, _T]): - __slots__ = ("func", "__doc__") - - def __init__(self, func: Callable[[_S], _T]) -> None: - self.func = func - self.__doc__ = func.__doc__ - - @overload - def __get__( - self, instance: None, owner: type[_S] | None = ... - ) -> cached_property[_S, _T]: - ... - - @overload - def __get__(self, instance: _S, owner: type[_S] | None = ...) -> _T: - ... - - def __get__(self, instance, owner=None): - if instance is None: - return self - value = instance.__dict__[self.func.__name__] = self.func(instance) - return value - - def get_user_id() -> int | None: """Return the current process's real user id or None if it could not be determined. diff --git a/contrib/python/pytest/py3/_pytest/config/__init__.py b/contrib/python/pytest/py3/_pytest/config/__init__.py index e3990d175df..d0afa356324 100644 --- a/contrib/python/pytest/py3/_pytest/config/__init__.py +++ b/contrib/python/pytest/py3/_pytest/config/__init__.py @@ -1,26 +1,29 @@ """Command line options, ini-file and conftest.py processing.""" + import argparse import collections.abc import copy import dataclasses import enum +from functools import lru_cache import glob +import importlib.metadata import inspect import os +from pathlib import Path import re import shlex import sys -import types -import warnings -from functools import lru_cache -from pathlib import Path from textwrap import dedent +import types from types import FunctionType from types import TracebackType from typing import Any from typing import Callable from typing import cast from typing import Dict +from typing import Final +from typing import final from typing import Generator from typing import IO from typing import Iterable @@ -34,22 +37,25 @@ from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union +import warnings +import pluggy from pluggy import HookimplMarker +from pluggy import HookimplOpts from pluggy import HookspecMarker +from pluggy import HookspecOpts from pluggy import PluginManager -import _pytest._code -import _pytest.deprecated -import _pytest.hookspec +from .compat import PathAwareHookProxy from .exceptions import PrintHelp as PrintHelp from .exceptions import UsageError as UsageError from .findpaths import determine_setup +import _pytest._code from _pytest._code import ExceptionInfo from _pytest._code import filter_traceback from _pytest._io import TerminalWriter -from _pytest.compat import final -from _pytest.compat import importlib_metadata # type: ignore[attr-defined] +import _pytest.deprecated +import _pytest.hookspec from _pytest.outcomes import fail from _pytest.outcomes import Skipped from _pytest.pathlib import absolutepath @@ -62,10 +68,12 @@ from _pytest.stash import Stash from _pytest.warning_types import PytestConfigWarning from _pytest.warning_types import warn_explicit_for + if TYPE_CHECKING: + from .argparsing import Argument + from .argparsing import Parser from _pytest._code.code import _TracebackStyle from _pytest.terminal import TerminalReporter - from .argparsing import Argument _PluggyPlugin = object @@ -116,9 +124,7 @@ class ConftestImportFailure(Exception): self.excinfo = excinfo def __str__(self) -> str: - return "{}: {} (from {})".format( - self.excinfo[0].__name__, self.excinfo[1], self.path - ) + return f"{self.excinfo[0].__name__}: {self.excinfo[1]} (from {self.path})" def filter_traceback_for_conftest_import_failure( @@ -260,7 +266,8 @@ default_plugins = essential_plugins + ( "logging", "reports", "python_path", - *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), + "unraisableexception", + "threadexception", "faulthandler", ) @@ -353,9 +360,9 @@ def _get_legacy_hook_marks( if TYPE_CHECKING: # abuse typeguard from importlib to avoid massive method type union thats lacking a alias assert inspect.isroutine(method) - known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} - must_warn: list[str] = [] - opts: dict[str, bool] = {} + known_marks: Set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: List[str] = [] + opts: Dict[str, bool] = {} for opt_name in opt_names: opt_attr = getattr(method, opt_name, AttributeError) if opt_attr is not AttributeError: @@ -409,8 +416,6 @@ class PytestPluginManager(PluginManager): # session (#9478), often with the same path, so cache it. self._get_directory = lru_cache(256)(_get_directory) - self._duplicatepaths: Set[Path] = set() - # plugins that were explicitly skipped with pytest.skip # list of (module name, skip reason) # previously we would issue a warning when a plugin was skipped, but @@ -440,7 +445,10 @@ class PytestPluginManager(PluginManager): # Used to know when we are importing conftests after the pytest_configure stage. self._configured = False - def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): + def parse_hookimpl_opts( + self, plugin: _PluggyPlugin, name: str + ) -> Optional[HookimplOpts]: + """:meta private:""" # pytest hooks are always prefixed with "pytest_", # so we avoid accessing possibly non-readable attributes # (see issue #1073). @@ -463,7 +471,8 @@ class PytestPluginManager(PluginManager): method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") ) - def parse_hookspec_opts(self, module_or_class, name: str): + def parse_hookspec_opts(self, module_or_class, name: str) -> Optional[HookspecOpts]: + """:meta private:""" opts = super().parse_hookspec_opts(module_or_class, name) if opts is None: method = getattr(module_or_class, name) @@ -488,15 +497,19 @@ class PytestPluginManager(PluginManager): ) ) return None - ret: Optional[str] = super().register(plugin, name) - if ret: + plugin_name = super().register(plugin, name) + if plugin_name is not None: self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self) + kwargs=dict( + plugin=plugin, + plugin_name=plugin_name, + manager=self, + ) ) if isinstance(plugin, types.ModuleType): self.consider_module(plugin) - return ret + return plugin_name def getplugin(self, name: str): # Support deprecated naming because plugins (xdist e.g.) use it. @@ -577,26 +590,25 @@ class PytestPluginManager(PluginManager): def _try_load_conftest( self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path ) -> None: - self._getconftestmodules(anchor, importmode, rootpath) + self._loadconftestmodules(anchor, importmode, rootpath) # let's also consider test* subdirs if anchor.is_dir(): for x in anchor.glob("test*"): if x.is_dir(): - self._getconftestmodules(x, importmode, rootpath) + self._loadconftestmodules(x, importmode, rootpath) - def _getconftestmodules( + def _loadconftestmodules( self, path: Path, importmode: Union[str, ImportMode], rootpath: Path - ) -> Sequence[types.ModuleType]: + ) -> None: if self._noconftest: - return [] + return directory = self._get_directory(path) # Optimization: avoid repeated searches in the same directory. # Assumes always called with same importmode and rootpath. - existing_clist = self._dirpath2confmods.get(directory) - if existing_clist is not None: - return existing_clist + if directory in self._dirpath2confmods: + return # XXX these days we may rather want to use config.rootpath # and allow users to opt into looking into the rootdir parent @@ -609,16 +621,17 @@ class PytestPluginManager(PluginManager): mod = self._importconftest(conftestpath, importmode, rootpath) clist.append(mod) self._dirpath2confmods[directory] = clist - return clist + + def _getconftestmodules(self, path: Path) -> Sequence[types.ModuleType]: + directory = self._get_directory(path) + return self._dirpath2confmods.get(directory, ()) def _rget_with_confmod( self, name: str, path: Path, - importmode: Union[str, ImportMode], - rootpath: Path, ) -> Tuple[types.ModuleType, Any]: - modules = self._getconftestmodules(path, importmode, rootpath=rootpath) + modules = self._getconftestmodules(path) for mod in reversed(modules): try: return mod, getattr(mod, name) @@ -629,7 +642,8 @@ class PytestPluginManager(PluginManager): def _importconftest( self, conftestpath: Path, importmode: Union[str, ImportMode], rootpath: Path ) -> types.ModuleType: - existing = self.get_plugin(str(conftestpath)) + conftestpath_plugin_name = str(conftestpath) + existing = self.get_plugin(conftestpath_plugin_name) if existing is not None: return cast(types.ModuleType, existing) @@ -651,10 +665,15 @@ class PytestPluginManager(PluginManager): if dirpath in self._dirpath2confmods: for path, mods in self._dirpath2confmods.items(): if dirpath in path.parents or path == dirpath: - assert mod not in mods + if mod in mods: + raise AssertionError( + f"While trying to load conftest path {str(conftestpath)}, " + f"found that the module {mod} is already loaded with path {mod.__file__}. " + "This is not supposed to happen. Please report this issue to pytest." + ) mods.append(mod) self.trace(f"loading conftestmodule {mod!r}") - self.consider_conftest(mod) + self.consider_conftest(mod, registration_name=conftestpath_plugin_name) return mod def _check_non_top_pytest_plugins( @@ -734,9 +753,11 @@ class PytestPluginManager(PluginManager): del self._name2plugin["pytest_" + name] self.import_plugin(arg, consider_entry_points=True) - def consider_conftest(self, conftestmodule: types.ModuleType) -> None: + def consider_conftest( + self, conftestmodule: types.ModuleType, registration_name: str + ) -> None: """:meta private:""" - self.register(conftestmodule, name=conftestmodule.__file__) + self.register(conftestmodule, name=registration_name) def consider_env(self) -> None: """:meta private:""" @@ -792,7 +813,7 @@ class PytestPluginManager(PluginManager): def _get_plugin_specs_as_list( - specs: Union[None, types.ModuleType, str, Sequence[str]] + specs: Union[None, types.ModuleType, str, Sequence[str]], ) -> List[str]: """Parse a plugins specification into a list of plugin names.""" # None means empty. @@ -949,7 +970,8 @@ class Config: #: Command line arguments. ARGS = enum.auto() #: Invocation directory. - INCOVATION_DIR = enum.auto() + INVOCATION_DIR = enum.auto() + INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias #: 'testpaths' configuration value. TESTPATHS = enum.auto() @@ -959,7 +981,8 @@ class Config: *, invocation_params: Optional[InvocationParams] = None, ) -> None: - from .argparsing import Parser, FILE_OR_DIR + from .argparsing import FILE_OR_DIR + from .argparsing import Parser if invocation_params is None: invocation_params = self.InvocationParams( @@ -999,10 +1022,8 @@ class Config: # Deprecated alias. Was never public. Can be removed in a few releases. self._store = self.stash - from .compat import PathAwareHookProxy - self.trace = self.pluginmanager.trace.root.get("config") - self.hook = PathAwareHookProxy(self.pluginmanager.hook) + self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] self._inicache: Dict[str, Any] = {} self._override_ini: Sequence[str] = () self._opt2dest: Dict[str, str] = {} @@ -1167,7 +1188,7 @@ class Config: ns.inifilename, ns.file_or_dir + unknown_args, rootdir_cmd_arg=ns.rootdir or None, - config=self, + invocation_dir=self.invocation_params.dir, ) self._rootpath = rootpath self._inipath = inipath @@ -1216,7 +1237,7 @@ class Config: package_files = ( str(file) - for dist in importlib_metadata.distributions() + for dist in importlib.metadata.distributions() if any(ep.group == "pytest11" for ep in dist.entry_points) for file in dist.files or [] ) @@ -1240,7 +1261,7 @@ class Config: self, *, args: List[str], - pyargs: List[str], + pyargs: bool, testpaths: List[str], invocation_dir: Path, rootpath: Path, @@ -1275,7 +1296,7 @@ class Config: else: result = [] if not result: - source = Config.ArgsSource.INCOVATION_DIR + source = Config.ArgsSource.INVOCATION_DIR result = [str(invocation_dir)] return result, source @@ -1338,12 +1359,14 @@ class Config: else: raise - @hookimpl(hookwrapper=True) - def pytest_collection(self) -> Generator[None, None, None]: + @hookimpl(wrapper=True) + def pytest_collection(self) -> Generator[None, object, object]: # Validate invalid ini keys after collection is done so we take in account # options added by late-loading conftest files. - yield - self._validate_config_options() + try: + return (yield) + finally: + self._validate_config_options() def _checkversion(self) -> None: import pytest @@ -1378,8 +1401,9 @@ class Config: return # Imported lazily to improve start-up time. + from packaging.requirements import InvalidRequirement + from packaging.requirements import Requirement from packaging.version import Version - from packaging.requirements import InvalidRequirement, Requirement plugin_info = self.pluginmanager.list_plugin_distinfo() plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} @@ -1445,7 +1469,7 @@ class Config: """Issue and handle a warning during the "configure" stage. During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` - function because it is not possible to have hookwrappers around ``pytest_configure``. + function because it is not possible to have hook wrappers around ``pytest_configure``. This function is mainly intended for plugins that need to issue warnings during ``pytest_configure`` (or similar stages). @@ -1487,6 +1511,27 @@ class Config: def getini(self, name: str): """Return configuration value from an :ref:`ini file <configfiles>`. + If a configuration value is not defined in an + :ref:`ini file <configfiles>`, then the ``default`` value provided while + registering the configuration through + :func:`parser.addini <pytest.Parser.addini>` will be returned. + Please note that you can even provide ``None`` as a valid + default value. + + If ``default`` is not provided while registering using + :func:`parser.addini <pytest.Parser.addini>`, then a default value + based on the ``type`` parameter passed to + :func:`parser.addini <pytest.Parser.addini>` will be returned. + The default values based on ``type`` are: + ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` + ``bool`` : ``False`` + ``string`` : empty string ``""`` + + If neither the ``default`` nor the ``type`` parameter is passed + while registering the configuration through + :func:`parser.addini <pytest.Parser.addini>`, then the configuration + is treated as a string and a default empty string '' is returned. + If the specified name hasn't been registered through a prior :func:`parser.addini <pytest.Parser.addini>` call (usually from a plugin), a ValueError is raised. @@ -1513,11 +1558,7 @@ class Config: try: value = self.inicfg[name] except KeyError: - if default is not None: - return default - if type is None: - return "" - return [] + return default else: value = override_value # Coerce the values based on types. @@ -1557,13 +1598,9 @@ class Config: else: return self._getini_unknown_type(name, type, value) - def _getconftest_pathlist( - self, name: str, path: Path, rootpath: Path - ) -> Optional[List[Path]]: + def _getconftest_pathlist(self, name: str, path: Path) -> Optional[List[Path]]: try: - mod, relroots = self.pluginmanager._rget_with_confmod( - name, path, self.getoption("importmode"), rootpath - ) + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) except KeyError: return None assert mod.__file__ is not None @@ -1588,9 +1625,7 @@ class Config: key, user_ini_value = ini_config.split("=", 1) except ValueError as e: raise UsageError( - "-o/--override-ini expects option=value style (got: {!r}).".format( - ini_config - ) + f"-o/--override-ini expects option=value style (got: {ini_config!r})." ) from e else: if key == name: @@ -1629,6 +1664,77 @@ class Config: """Deprecated, use getoption(skip=True) instead.""" return self.getoption(name, skip=True) + #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). + VERBOSITY_ASSERTIONS: Final = "assertions" + _VERBOSITY_INI_DEFAULT: Final = "auto" + + def get_verbosity(self, verbosity_type: Optional[str] = None) -> int: + r"""Retrieve the verbosity level for a fine-grained verbosity type. + + :param verbosity_type: Verbosity type to get level for. If a level is + configured for the given type, that value will be returned. If the + given type is not a known verbosity type, the global verbosity + level will be returned. If the given type is None (default), the + global verbosity level will be returned. + + To configure a level for a fine-grained verbosity type, the + configuration file should have a setting for the configuration name + and a numeric value for the verbosity level. A special value of "auto" + can be used to explicitly use the global verbosity level. + + Example: + .. code-block:: ini + + # content of pytest.ini + [pytest] + verbosity_assertions = 2 + + .. code-block:: console + + pytest -v + + .. code-block:: python + + print(config.get_verbosity()) # 1 + print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 + """ + global_level = self.option.verbose + assert isinstance(global_level, int) + if verbosity_type is None: + return global_level + + ini_name = Config._verbosity_ini_name(verbosity_type) + if ini_name not in self._parser._inidict: + return global_level + + level = self.getini(ini_name) + if level == Config._VERBOSITY_INI_DEFAULT: + return global_level + + return int(level) + + @staticmethod + def _verbosity_ini_name(verbosity_type: str) -> str: + return f"verbosity_{verbosity_type}" + + @staticmethod + def _add_verbosity_ini(parser: "Parser", verbosity_type: str, help: str) -> None: + """Add a output verbosity configuration option for the given output type. + + :param parser: Parser for command line arguments and ini-file values. + :param verbosity_type: Fine-grained verbosity category. + :param help: Description of the output this type controls. + + The value should be retrieved via a call to + :py:func:`config.get_verbosity(type) <pytest.Config.get_verbosity>`. + """ + parser.addini( + Config._verbosity_ini_name(verbosity_type), + help=help, + type="string", + default=Config._VERBOSITY_INI_DEFAULT, + ) + def _warn_about_missing_assertion(self, mode: str) -> None: if not _assertion_supported(): if mode == "plain": diff --git a/contrib/python/pytest/py3/_pytest/config/argparsing.py b/contrib/python/pytest/py3/_pytest/config/argparsing.py index d3f01916b61..0f91dc0fe91 100644 --- a/contrib/python/pytest/py3/_pytest/config/argparsing.py +++ b/contrib/python/pytest/py3/_pytest/config/argparsing.py @@ -1,35 +1,41 @@ import argparse +from gettext import gettext import os import sys -import warnings -from gettext import gettext from typing import Any from typing import Callable from typing import cast from typing import Dict +from typing import final from typing import List +from typing import Literal from typing import Mapping from typing import NoReturn from typing import Optional from typing import Sequence from typing import Tuple -from typing import TYPE_CHECKING from typing import Union +import warnings import _pytest._io -from _pytest.compat import final from _pytest.config.exceptions import UsageError from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT from _pytest.deprecated import ARGUMENT_TYPE_STR from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE from _pytest.deprecated import check_ispytest -if TYPE_CHECKING: - from typing_extensions import Literal FILE_OR_DIR = "file_or_dir" +class NotSet: + def __repr__(self) -> str: + return "<notset>" + + +NOT_SET = NotSet() + + @final class Parser: """Parser for command line arguments and ini-file values. @@ -93,7 +99,7 @@ class Parser: :param opts: Option names, can be short or long options. :param attrs: - Same attributes as the argparse library's :py:func:`add_argument() + Same attributes as the argparse library's :meth:`add_argument() <argparse.ArgumentParser.add_argument>` function accepts. After command line parsing, options are available on the pytest config @@ -177,9 +183,9 @@ class Parser: name: str, help: str, type: Optional[ - "Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']" + Literal["string", "paths", "pathlist", "args", "linelist", "bool"] ] = None, - default: Any = None, + default: Any = NOT_SET, ) -> None: """Register an ini-file option. @@ -206,10 +212,30 @@ class Parser: :py:func:`config.getini(name) <pytest.Config.getini>`. """ assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") + if default is NOT_SET: + default = get_ini_default_for_type(type) + self._inidict[name] = (help, type, default) self._ininames.append(name) +def get_ini_default_for_type( + type: Optional[Literal["string", "paths", "pathlist", "args", "linelist", "bool"]], +) -> Any: + """ + Used by addini to get the default value for a given ini-option type, when + default is not supplied. + """ + if type is None: + return "" + elif type in ("paths", "pathlist", "args", "linelist"): + return [] + elif type == "bool": + return False + else: + return "" + + class ArgumentError(Exception): """Raised if an Argument instance is created with invalid or inconsistent arguments.""" @@ -375,7 +401,7 @@ class OptionGroup: :param opts: Option names, can be short or long options. :param attrs: - Same attributes as the argparse library's :py:func:`add_argument() + Same attributes as the argparse library's :meth:`add_argument() <argparse.ArgumentParser.add_argument>` function accepts. """ conflict = set(opts).intersection( @@ -455,7 +481,7 @@ class MyOptionParser(argparse.ArgumentParser): ) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]: if not arg_string: return None - if not arg_string[0] in self.prefix_chars: + if arg_string[0] not in self.prefix_chars: return None if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] diff --git a/contrib/python/pytest/py3/_pytest/config/compat.py b/contrib/python/pytest/py3/_pytest/config/compat.py index 5bd922a4a87..65e46c3679a 100644 --- a/contrib/python/pytest/py3/_pytest/config/compat.py +++ b/contrib/python/pytest/py3/_pytest/config/compat.py @@ -1,15 +1,19 @@ +from __future__ import annotations + import functools -import warnings from pathlib import Path -from typing import Optional +from typing import Mapping +import warnings + +import pluggy from ..compat import LEGACY_PATH from ..compat import legacy_path from ..deprecated import HOOK_LEGACY_PATH_ARG -from _pytest.nodes import _check_path + # hookname: (Path, LEGACY_PATH) -imply_paths_hooks = { +imply_paths_hooks: Mapping[str, tuple[str, str]] = { "pytest_ignore_collect": ("collection_path", "path"), "pytest_collect_file": ("file_path", "path"), "pytest_pycollect_makemodule": ("module_path", "path"), @@ -18,6 +22,14 @@ imply_paths_hooks = { } +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + class PathAwareHookProxy: """ this helper wraps around hook callers @@ -27,24 +39,24 @@ class PathAwareHookProxy: this may have to be changed later depending on bugs """ - def __init__(self, hook_caller): - self.__hook_caller = hook_caller + def __init__(self, hook_relay: pluggy.HookRelay) -> None: + self._hook_relay = hook_relay - def __dir__(self): - return dir(self.__hook_caller) + def __dir__(self) -> list[str]: + return dir(self._hook_relay) - def __getattr__(self, key, _wraps=functools.wraps): - hook = getattr(self.__hook_caller, key) + def __getattr__(self, key: str) -> pluggy.HookCaller: + hook: pluggy.HookCaller = getattr(self._hook_relay, key) if key not in imply_paths_hooks: self.__dict__[key] = hook return hook else: path_var, fspath_var = imply_paths_hooks[key] - @_wraps(hook) + @functools.wraps(hook) def fixed_hook(**kw): - path_value: Optional[Path] = kw.pop(path_var, None) - fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None) + path_value: Path | None = kw.pop(path_var, None) + fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None) if fspath_value is not None: warnings.warn( HOOK_LEGACY_PATH_ARG.format( @@ -65,6 +77,8 @@ class PathAwareHookProxy: kw[fspath_var] = fspath_value return hook(**kw) + fixed_hook.name = hook.name # type: ignore[attr-defined] + fixed_hook.spec = hook.spec # type: ignore[attr-defined] fixed_hook.__name__ = key self.__dict__[key] = fixed_hook - return fixed_hook + return fixed_hook # type: ignore[return-value] diff --git a/contrib/python/pytest/py3/_pytest/config/exceptions.py b/contrib/python/pytest/py3/_pytest/config/exceptions.py index 4f1320e758d..4031ea732f3 100644 --- a/contrib/python/pytest/py3/_pytest/config/exceptions.py +++ b/contrib/python/pytest/py3/_pytest/config/exceptions.py @@ -1,4 +1,4 @@ -from _pytest.compat import final +from typing import final @final diff --git a/contrib/python/pytest/py3/_pytest/config/findpaths.py b/contrib/python/pytest/py3/_pytest/config/findpaths.py index 02674ffae3b..0151014c4f4 100644 --- a/contrib/python/pytest/py3/_pytest/config/findpaths.py +++ b/contrib/python/pytest/py3/_pytest/config/findpaths.py @@ -1,13 +1,12 @@ import os -import sys from pathlib import Path +import sys from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Sequence from typing import Tuple -from typing import TYPE_CHECKING from typing import Union import iniconfig @@ -18,9 +17,6 @@ from _pytest.pathlib import absolutepath from _pytest.pathlib import commonpath from _pytest.pathlib import safe_exists -if TYPE_CHECKING: - from . import Config - def _parse_ini_config(path: Path) -> iniconfig.IniConfig: """Parse the given generic '.ini' file using legacy IniConfig parser, returning @@ -41,7 +37,6 @@ def load_config_dict_from_file( Return None if the file does not contain valid pytest configuration. """ - # Configuration from ini files are obtained from the [pytest] section, if present. if filepath.suffix == ".ini": iniconfig = _parse_ini_config(filepath) @@ -169,8 +164,21 @@ def determine_setup( inifile: Optional[str], args: Sequence[str], rootdir_cmd_arg: Optional[str] = None, - config: Optional["Config"] = None, + invocation_dir: Optional[Path] = None, ) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]: + """Determine the rootdir, inifile and ini configuration values from the + command line arguments. + + :param inifile: + The `--inifile` command line argument, if given. + :param args: + The free command line arguments. + :param rootdir_cmd_arg: + The `--rootdir` command line argument, if given. + :param invocation_dir: + The working directory when pytest was invoked, if known. + If not known, the current working directory is used. + """ rootdir = None dirs = get_dirs_from_args(args) if inifile: @@ -191,8 +199,8 @@ def determine_setup( if dirs != [ancestor]: rootdir, inipath, inicfg = locate_config(dirs) if rootdir is None: - if config is not None: - cwd = config.invocation_params.dir + if invocation_dir is not None: + cwd = invocation_dir else: cwd = Path.cwd() rootdir = get_common_ancestor([cwd, ancestor]) @@ -202,9 +210,7 @@ def determine_setup( rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) if not rootdir.is_dir(): raise UsageError( - "Directory '{}' not found. Check your '--rootdir' option.".format( - rootdir - ) + f"Directory '{rootdir}' not found. Check your '--rootdir' option." ) assert rootdir is not None return rootdir, inipath, inicfg or {} diff --git a/contrib/python/pytest/py3/_pytest/debugging.py b/contrib/python/pytest/py3/_pytest/debugging.py index 21c8bcf3b07..52a39d2fe47 100644 --- a/contrib/python/pytest/py3/_pytest/debugging.py +++ b/contrib/python/pytest/py3/_pytest/debugging.py @@ -1,10 +1,10 @@ """Interactive debugging with PDB, the Python Debugger.""" + import argparse import functools import os import sys import types -import unittest from typing import Any from typing import Callable from typing import Generator @@ -14,6 +14,7 @@ from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union +import unittest from _pytest import outcomes from _pytest._code import ExceptionInfo @@ -26,6 +27,7 @@ from _pytest.config.exceptions import UsageError from _pytest.nodes import Node from _pytest.reports import BaseReport + if TYPE_CHECKING: from _pytest.capture import CaptureManager from _pytest.runner import CallInfo @@ -300,8 +302,7 @@ class pytestPDB: elif capturing: tw.sep( ">", - "PDB %s (IO-capturing turned off for %s)" - % (method, capturing), + f"PDB {method} (IO-capturing turned off for {capturing})", ) else: tw.sep(">", f"PDB {method}") @@ -343,10 +344,10 @@ class PdbInvoke: class PdbTrace: - @hookimpl(hookwrapper=True) - def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]: + @hookimpl(wrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, object, object]: wrap_pytest_function_for_tracing(pyfuncitem) - yield + return (yield) def wrap_pytest_function_for_tracing(pyfuncitem): diff --git a/contrib/python/pytest/py3/_pytest/deprecated.py b/contrib/python/pytest/py3/_pytest/deprecated.py index b9c10df7a00..76170c8c0e8 100644 --- a/contrib/python/pytest/py3/_pytest/deprecated.py +++ b/contrib/python/pytest/py3/_pytest/deprecated.py @@ -8,12 +8,15 @@ All constants defined in this module should be either instances of :class:`PytestWarning`, or :class:`UnformattedWarning` in case of warnings which need to format their messages. """ + from warnings import warn from _pytest.warning_types import PytestDeprecationWarning from _pytest.warning_types import PytestRemovedIn8Warning +from _pytest.warning_types import PytestRemovedIn9Warning from _pytest.warning_types import UnformattedWarning + # set of plugins which have been integrated into the core; we use this list to ignore # them during registration to avoid conflicts DEPRECATED_EXTERNAL_PLUGINS = { @@ -122,6 +125,11 @@ HOOK_LEGACY_MARKING = UnformattedWarning( "#configuring-hook-specs-impls-using-markers", ) +MARKED_FIXTURE = PytestRemovedIn9Warning( + "Marks applied to fixtures have no effect\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" +) + # You want to make some `__init__` or function "private". # # def my_private_function(some, args): diff --git a/contrib/python/pytest/py3/_pytest/doctest.py b/contrib/python/pytest/py3/_pytest/doctest.py index ca41a98ea9c..c6b1a9df5cc 100644 --- a/contrib/python/pytest/py3/_pytest/doctest.py +++ b/contrib/python/pytest/py3/_pytest/doctest.py @@ -1,15 +1,15 @@ """Discover and run doctests in modules and test files.""" + import bdb +from contextlib import contextmanager import functools import inspect import os +from pathlib import Path import platform import sys import traceback import types -import warnings -from contextlib import contextmanager -from pathlib import Path from typing import Any from typing import Callable from typing import Dict @@ -23,6 +23,7 @@ from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union +import warnings from _pytest import outcomes from _pytest._code.code import ExceptionInfo @@ -33,17 +34,17 @@ from _pytest.compat import safe_getattr from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.fixtures import fixture -from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import TopRequest from _pytest.nodes import Collector from _pytest.nodes import Item from _pytest.outcomes import OutcomeException from _pytest.outcomes import skip from _pytest.pathlib import fnmatch_ex -from _pytest.pathlib import import_path from _pytest.python import Module from _pytest.python_api import approx from _pytest.warning_types import PytestWarning + if TYPE_CHECKING: import doctest @@ -105,7 +106,7 @@ def pytest_addoption(parser: Parser) -> None: "--doctest-ignore-import-errors", action="store_true", default=False, - help="Ignore doctest ImportErrors", + help="Ignore doctest collection errors", dest="doctest_ignore_import_errors", ) group.addoption( @@ -255,14 +256,20 @@ class DoctestItem(Item): self, name: str, parent: "Union[DoctestTextfile, DoctestModule]", - runner: Optional["doctest.DocTestRunner"] = None, - dtest: Optional["doctest.DocTest"] = None, + runner: "doctest.DocTestRunner", + dtest: "doctest.DocTest", ) -> None: super().__init__(name, parent) self.runner = runner self.dtest = dtest + + # Stuff needed for fixture support. self.obj = None - self.fixture_request: Optional[FixtureRequest] = None + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() @classmethod def from_parent( # type: ignore @@ -277,19 +284,18 @@ class DoctestItem(Item): """The public named constructor.""" return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + def _initrequest(self) -> None: + self.funcargs: Dict[str, object] = {} + self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] + def setup(self) -> None: - if self.dtest is not None: - self.fixture_request = _setup_fixtures(self) - globs = dict(getfixture=self.fixture_request.getfixturevalue) - for name, value in self.fixture_request.getfixturevalue( - "doctest_namespace" - ).items(): - globs[name] = value - self.dtest.globs.update(globs) + self._request._fillfixtures() + globs = dict(getfixture=self._request.getfixturevalue) + for name, value in self._request.getfixturevalue("doctest_namespace").items(): + globs[name] = value + self.dtest.globs.update(globs) def runtest(self) -> None: - assert self.dtest is not None - assert self.runner is not None _check_all_skipped(self.dtest) self._disable_output_capturing_for_darwin() failures: List["doctest.DocTestFailure"] = [] @@ -376,7 +382,6 @@ class DoctestItem(Item): return ReprFailDoctest(reprlocation_lines) def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: - assert self.dtest is not None return self.path, self.dtest.lineno, "[doctest] %s" % self.name @@ -396,8 +401,8 @@ def _get_flag_lookup() -> Dict[str, int]: ) -def get_optionflags(parent): - optionflags_str = parent.config.getini("doctest_optionflags") +def get_optionflags(config: Config) -> int: + optionflags_str = config.getini("doctest_optionflags") flag_lookup_table = _get_flag_lookup() flag_acc = 0 for flag in optionflags_str: @@ -405,8 +410,8 @@ def get_optionflags(parent): return flag_acc -def _get_continue_on_failure(config): - continue_on_failure = config.getvalue("doctest_continue_on_failure") +def _get_continue_on_failure(config: Config) -> bool: + continue_on_failure: bool = config.getvalue("doctest_continue_on_failure") if continue_on_failure: # We need to turn off this if we use pdb since we should stop at # the first failure. @@ -429,7 +434,7 @@ class DoctestTextfile(Module): name = self.path.name globs = {"__name__": "__main__"} - optionflags = get_optionflags(self) + optionflags = get_optionflags(self.config) runner = _get_runner( verbose=False, @@ -481,9 +486,9 @@ def _patch_unwrap_mock_aware() -> Generator[None, None, None]: return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) except Exception as e: warnings.warn( - "Got %r when unwrapping %r. This is usually caused " + f"Got {e!r} when unwrapping {func!r}. This is usually caused " "by a violation of Python's object protocol; see e.g. " - "https://github.com/pytest-dev/pytest/issues/5080" % (e, func), + "https://github.com/pytest-dev/pytest/issues/5080", PytestWarning, ) raise @@ -545,9 +550,7 @@ class DoctestModule(Module): Here we override `_from_module` to check the underlying function instead. https://github.com/python/cpython/issues/107995 """ - if hasattr(functools, "cached_property") and isinstance( - object, functools.cached_property - ): + if isinstance(object, functools.cached_property): object = object.func # Type ignored because this is a private function. @@ -564,19 +567,20 @@ class DoctestModule(Module): ) else: try: - module = import_path( - self.path, - root=self.config.rootpath, - mode=self.config.getoption("importmode"), - ) - except ImportError: + module = self.obj + except Collector.CollectError: if self.config.getvalue("doctest_ignore_import_errors"): skip("unable to import module %r" % self.path) else: raise + + # While doctests currently don't support fixtures directly, we still + # need to pick up autouse fixtures. + self.session._fixturemanager.parsefactories(self) + # Uses internal doctest module parsing mechanism. finder = MockAwareDocTestFinder() - optionflags = get_optionflags(self) + optionflags = get_optionflags(self.config) runner = _get_runner( verbose=False, optionflags=optionflags, @@ -591,22 +595,6 @@ class DoctestModule(Module): ) -def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest: - """Used by DoctestTextfile and DoctestItem to setup fixture information.""" - - def func() -> None: - pass - - doctest_item.funcargs = {} # type: ignore[attr-defined] - fm = doctest_item.session._fixturemanager - doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined] - node=doctest_item, func=func, cls=None, funcargs=False - ) - fixture_request = FixtureRequest(doctest_item, _ispytest=True) - fixture_request._fillfixtures() - return fixture_request - - def _init_checker_class() -> Type["doctest.OutputChecker"]: import doctest import re diff --git a/contrib/python/pytest/py3/_pytest/faulthandler.py b/contrib/python/pytest/py3/_pytest/faulthandler.py index d8c7e9fd3b6..083bcb83739 100644 --- a/contrib/python/pytest/py3/_pytest/faulthandler.py +++ b/contrib/python/pytest/py3/_pytest/faulthandler.py @@ -2,11 +2,11 @@ import os import sys from typing import Generator -import pytest from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.nodes import Item from _pytest.stash import StashKey +import pytest fault_handler_original_stderr_fd_key = StashKey[int]() @@ -69,8 +69,8 @@ def get_timeout_config_value(config: Config) -> float: return float(config.getini("faulthandler_timeout") or 0.0) -@pytest.hookimpl(hookwrapper=True, trylast=True) -def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: timeout = get_timeout_config_value(item.config) if timeout > 0: import faulthandler @@ -78,11 +78,11 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: stderr = item.config.stash[fault_handler_stderr_fd_key] faulthandler.dump_traceback_later(timeout, file=stderr) try: - yield + return (yield) finally: faulthandler.cancel_dump_traceback_later() else: - yield + return (yield) @pytest.hookimpl(tryfirst=True) diff --git a/contrib/python/pytest/py3/_pytest/fixtures.py b/contrib/python/pytest/py3/_pytest/fixtures.py index 0462504efaf..206fd084ae4 100644 --- a/contrib/python/pytest/py3/_pytest/fixtures.py +++ b/contrib/python/pytest/py3/_pytest/fixtures.py @@ -1,18 +1,19 @@ +import abc +from collections import defaultdict +from collections import deque +from contextlib import suppress import dataclasses import functools import inspect import os -import sys -import warnings -from collections import defaultdict -from collections import deque -from contextlib import suppress from pathlib import Path -from types import TracebackType +from typing import AbstractSet from typing import Any from typing import Callable from typing import cast from typing import Dict +from typing import Final +from typing import final from typing import Generator from typing import Generic from typing import Iterable @@ -21,13 +22,14 @@ from typing import List from typing import MutableMapping from typing import NoReturn from typing import Optional +from typing import overload from typing import Sequence from typing import Set from typing import Tuple -from typing import Type from typing import TYPE_CHECKING from typing import TypeVar from typing import Union +import warnings import _pytest from _pytest import nodes @@ -35,10 +37,8 @@ from _pytest._code import getfslineno from _pytest._code.code import FormattedExcinfo from _pytest._code.code import TerminalRepr from _pytest._io import TerminalWriter -from _pytest.compat import _format_args from _pytest.compat import _PytestWrapper from _pytest.compat import assert_never -from _pytest.compat import final from _pytest.compat import get_real_func from _pytest.compat import get_real_method from _pytest.compat import getfuncargnames @@ -47,12 +47,12 @@ from _pytest.compat import getlocation from _pytest.compat import is_generator from _pytest.compat import NOTSET from _pytest.compat import NotSetType -from _pytest.compat import overload from _pytest.compat import safe_getattr from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE from _pytest.deprecated import YIELD_FIXTURE from _pytest.mark import Mark from _pytest.mark import ParameterSet @@ -62,17 +62,17 @@ from _pytest.outcomes import skip from _pytest.outcomes import TEST_OUTCOME from _pytest.pathlib import absolutepath from _pytest.pathlib import bestrelpath +from _pytest.scope import _ScopeName from _pytest.scope import HIGH_SCOPES from _pytest.scope import Scope -from _pytest.stash import StashKey if TYPE_CHECKING: from typing import Deque - from _pytest.scope import _ScopeName from _pytest.main import Session from _pytest.python import CallSpec2 + from _pytest.python import Function from _pytest.python import Metafunc @@ -97,8 +97,8 @@ _FixtureCachedResult = Union[ None, # Cache key. object, - # Exc info if raised. - Tuple[Type[BaseException], BaseException, TracebackType], + # Exception if raised. + BaseException, ], ] @@ -120,9 +120,8 @@ def get_scope_package( from _pytest.python import Package current: Optional[Union[nodes.Item, nodes.Collector]] = node - fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py") while current and ( - not isinstance(current, Package) or fixture_package_name != current.nodeid + not isinstance(current, Package) or current.nodeid != fixturedef.baseid ): current = current.parent # type: ignore[assignment] if current is None: @@ -136,7 +135,9 @@ def get_scope_node( import _pytest.python if scope is Scope.Function: - return node.getparent(nodes.Item) + # Type ignored because this is actually safe, see: + # https://github.com/python/mypy/issues/4717 + return node.getparent(nodes.Item) # type: ignore[type-abstract] elif scope is Scope.Class: return node.getparent(_pytest.python.Class) elif scope is Scope.Module: @@ -149,80 +150,6 @@ def get_scope_node( assert_never(scope) -# Used for storing artificial fixturedefs for direct parametrization. -name2pseudofixturedef_key = StashKey[Dict[str, "FixtureDef[Any]"]]() - - -def add_funcarg_pseudo_fixture_def( - collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager" -) -> None: - # This function will transform all collected calls to functions - # if they use direct funcargs (i.e. direct parametrization) - # because we want later test execution to be able to rely on - # an existing FixtureDef structure for all arguments. - # XXX we can probably avoid this algorithm if we modify CallSpec2 - # to directly care for creating the fixturedefs within its methods. - if not metafunc._calls[0].funcargs: - # This function call does not have direct parametrization. - return - # Collect funcargs of all callspecs into a list of values. - arg2params: Dict[str, List[object]] = {} - arg2scope: Dict[str, Scope] = {} - for callspec in metafunc._calls: - for argname, argvalue in callspec.funcargs.items(): - assert argname not in callspec.params - callspec.params[argname] = argvalue - arg2params_list = arg2params.setdefault(argname, []) - callspec.indices[argname] = len(arg2params_list) - arg2params_list.append(argvalue) - if argname not in arg2scope: - scope = callspec._arg2scope.get(argname, Scope.Function) - arg2scope[argname] = scope - callspec.funcargs.clear() - - # Register artificial FixtureDef's so that later at test execution - # time we can rely on a proper FixtureDef to exist for fixture setup. - arg2fixturedefs = metafunc._arg2fixturedefs - for argname, valuelist in arg2params.items(): - # If we have a scope that is higher than function, we need - # to make sure we only ever create an according fixturedef on - # a per-scope basis. We thus store and cache the fixturedef on the - # node related to the scope. - scope = arg2scope[argname] - node = None - if scope is not Scope.Function: - node = get_scope_node(collector, scope) - if node is None: - assert scope is Scope.Class and isinstance( - collector, _pytest.python.Module - ) - # Use module-level collector for class-scope (for now). - node = collector - if node is None: - name2pseudofixturedef = None - else: - default: Dict[str, FixtureDef[Any]] = {} - name2pseudofixturedef = node.stash.setdefault( - name2pseudofixturedef_key, default - ) - if name2pseudofixturedef is not None and argname in name2pseudofixturedef: - arg2fixturedefs[argname] = [name2pseudofixturedef[argname]] - else: - fixturedef = FixtureDef( - fixturemanager=fixturemanager, - baseid="", - argname=argname, - func=get_direct_param_fixture_func, - scope=arg2scope[argname], - params=valuelist, - unittest=False, - ids=None, - ) - arg2fixturedefs[argname] = [fixturedef] - if name2pseudofixturedef is not None: - name2pseudofixturedef[argname] = fixturedef - - def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: """Return fixturemarker or None if it doesn't exist or raised exceptions.""" @@ -232,11 +159,17 @@ def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: ) -# Parametrized fixture key, helper alias for code below. -_Key = Tuple[object, ...] +@dataclasses.dataclass(frozen=True) +class FixtureArgKey: + argname: str + param_index: int + scoped_item_path: Optional[Path] + item_cls: Optional[type] -def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_Key]: +def get_parametrized_fixture_keys( + item: nodes.Item, scope: Scope +) -> Iterator[FixtureArgKey]: """Return list of keys for all parametrized arguments which match the specified scope.""" assert scope is not Scope.Function @@ -246,24 +179,28 @@ def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_K pass else: cs: CallSpec2 = callspec - # cs.indices.items() is random order of argnames. Need to + # cs.indices is random order of argnames. Need to # sort this so that different calls to # get_parametrized_fixture_keys will be deterministic. - for argname, param_index in sorted(cs.indices.items()): + for argname in sorted(cs.indices): if cs._arg2scope[argname] != scope: continue + + item_cls = None if scope is Scope.Session: - key: _Key = (argname, param_index) + scoped_item_path = None elif scope is Scope.Package: - key = (argname, param_index, item.path.parent) + scoped_item_path = item.path elif scope is Scope.Module: - key = (argname, param_index, item.path) + scoped_item_path = item.path elif scope is Scope.Class: + scoped_item_path = item.path item_cls = item.cls # type: ignore[attr-defined] - key = (argname, param_index, item.path, item_cls) else: assert_never(scope) - yield key + + param_index = cs.indices[argname] + yield FixtureArgKey(argname, param_index, scoped_item_path, item_cls) # Algorithm for sorting on a per-parametrized resource setup basis. @@ -273,19 +210,17 @@ def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_K def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: - argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]] = {} - items_by_argkey: Dict[Scope, Dict[_Key, Deque[nodes.Item]]] = {} + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[FixtureArgKey, None]]] = {} + items_by_argkey: Dict[Scope, Dict[FixtureArgKey, Deque[nodes.Item]]] = {} for scope in HIGH_SCOPES: - d: Dict[nodes.Item, Dict[_Key, None]] = {} - argkeys_cache[scope] = d - item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque) - items_by_argkey[scope] = item_d + scoped_argkeys_cache = argkeys_cache[scope] = {} + scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(deque) for item in items: keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None) if keys: - d[item] = keys + scoped_argkeys_cache[item] = keys for key in keys: - item_d[key].append(item) + scoped_items_by_argkey[key].append(item) items_dict = dict.fromkeys(items, None) return list( reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session) @@ -294,8 +229,8 @@ def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: def fix_cache_order( item: nodes.Item, - argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], - items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[FixtureArgKey, None]]], + items_by_argkey: Dict[Scope, Dict[FixtureArgKey, "Deque[nodes.Item]"]], ) -> None: for scope in HIGH_SCOPES: for key in argkeys_cache[scope].get(item, []): @@ -304,13 +239,13 @@ def fix_cache_order( def reorder_items_atscope( items: Dict[nodes.Item, None], - argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], - items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[FixtureArgKey, None]]], + items_by_argkey: Dict[Scope, Dict[FixtureArgKey, "Deque[nodes.Item]"]], scope: Scope, ) -> Dict[nodes.Item, None]: if scope is Scope.Function or len(items) < 3: return items - ignore: Set[Optional[_Key]] = set() + ignore: Set[Optional[FixtureArgKey]] = set() items_deque = deque(items) items_done: Dict[nodes.Item, None] = {} scoped_items_by_argkey = items_by_argkey[scope] @@ -348,21 +283,35 @@ def reorder_items_atscope( return items_done -def get_direct_param_fixture_func(request: "FixtureRequest") -> Any: - return request.param +@dataclasses.dataclass(frozen=True) +class FuncFixtureInfo: + """Fixture-related information for a fixture-requesting item (e.g. test + function). + This is used to examine the fixtures which an item requests statically + (known during collection). This includes autouse fixtures, fixtures + requested by the `usefixtures` marker, fixtures requested in the function + parameters, and the transitive closure of these. + + An item may also request fixtures dynamically (using `request.getfixturevalue`); + these are not reflected here. + """ -@dataclasses.dataclass -class FuncFixtureInfo: __slots__ = ("argnames", "initialnames", "names_closure", "name2fixturedefs") - # Original function argument names. + # Fixture names that the item requests directly by function parameters. argnames: Tuple[str, ...] - # Argnames that function immediately requires. These include argnames + - # fixture names specified via usefixtures and via autouse=True in fixture - # definitions. + # Fixture names that the item immediately requires. These include + # argnames + fixture names specified via usefixtures and via autouse=True in + # fixture definitions. initialnames: Tuple[str, ...] + # The transitive closure of the fixture names that the item requires. + # Note: can't include dynamic dependencies (`request.getfixturevalue` calls). names_closure: List[str] + # A map from a fixture name in the transitive closure to the FixtureDefs + # matching the name which are applicable to this function. + # There may be multiple overriding fixtures with the same name. The + # sequence is ordered from furthest to closes to the function. name2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]] def prune_dependency_tree(self) -> None: @@ -393,25 +342,45 @@ class FuncFixtureInfo: self.names_closure[:] = sorted(closure, key=self.names_closure.index) -class FixtureRequest: - """A request for a fixture from a test or fixture function. +class FixtureRequest(abc.ABC): + """The type of the ``request`` fixture. - A request object gives access to the requesting test context and has - an optional ``param`` attribute in case the fixture is parametrized - indirectly. + A request object gives access to the requesting test context and has a + ``param`` attribute in case the fixture is parametrized. """ - def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None: + def __init__( + self, + pyfuncitem: "Function", + fixturename: Optional[str], + arg2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]], + arg2index: Dict[str, int], + fixture_defs: Dict[str, "FixtureDef[Any]"], + *, + _ispytest: bool = False, + ) -> None: check_ispytest(_ispytest) - self._pyfuncitem = pyfuncitem #: Fixture for which this request is being performed. - self.fixturename: Optional[str] = None - self._scope = Scope.Function - self._fixture_defs: Dict[str, FixtureDef[Any]] = {} - fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo - self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() - self._arg2index: Dict[str, int] = {} - self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager + self.fixturename: Final = fixturename + self._pyfuncitem: Final = pyfuncitem + # The FixtureDefs for each fixture name requested by this item. + # Starts from the statically-known fixturedefs resolved during + # collection. Dynamically requested fixtures (using + # `request.getfixturevalue("foo")`) are added dynamically. + self._arg2fixturedefs: Final = arg2fixturedefs + # A fixture may override another fixture with the same name, e.g. a fixture + # in a module can override a fixture in a conftest, a fixture in a class can + # override a fixture in the module, and so on. + # An overriding fixture can request its own name; in this case it gets + # the value of the fixture it overrides, one level up. + # The _arg2index state keeps the current depth in the overriding chain. + # The fixturedefs list in _arg2fixturedefs for a given name is ordered from + # furthest to closest, so we use negative indexing -1, -2, ... to go from + # last to first. + self._arg2index: Final = arg2index + # The evaluated argnames so far, mapping to the FixtureDef they resolved + # to. + self._fixture_defs: Final = fixture_defs # Notes on the type of `param`: # -`request.param` is only defined in parametrized fixtures, and will raise # AttributeError otherwise. Python typing has no notion of "undefined", so @@ -423,37 +392,31 @@ class FixtureRequest: self.param: Any @property - def scope(self) -> "_ScopeName": + def _fixturemanager(self) -> "FixtureManager": + return self._pyfuncitem.session._fixturemanager + + @property + @abc.abstractmethod + def _scope(self) -> Scope: + raise NotImplementedError() + + @property + def scope(self) -> _ScopeName: """Scope string, one of "function", "class", "module", "package", "session".""" return self._scope.value @property def fixturenames(self) -> List[str]: """Names of all active fixtures in this request.""" - result = list(self._pyfuncitem._fixtureinfo.names_closure) + result = list(self._pyfuncitem.fixturenames) result.extend(set(self._fixture_defs).difference(result)) return result @property + @abc.abstractmethod def node(self): """Underlying collection node (depends on current request scope).""" - scope = self._scope - if scope is Scope.Function: - # This might also be a non-function Item despite its attribute name. - node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem - elif scope is Scope.Package: - # FIXME: _fixturedef is not defined on FixtureRequest (this class), - # but on FixtureRequest (a subclass). - node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined] - else: - node = get_scope_node(self._pyfuncitem, scope) - if node is None and scope is Scope.Class: - # Fallback to function item itself. - node = self._pyfuncitem - assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( - scope, self._pyfuncitem - ) - return node + raise NotImplementedError() def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]": fixturedefs = self._arg2fixturedefs.get(argname, None) @@ -464,12 +427,17 @@ class FixtureRequest: assert self._pyfuncitem.parent is not None parentid = self._pyfuncitem.parent.nodeid fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) - # TODO: Fix this type ignore. Either add assert or adjust types. - # Can this be None here? - self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment] - # fixturedefs list is immutable so we maintain a decreasing index. + if fixturedefs is not None: + self._arg2fixturedefs[argname] = fixturedefs + # No fixtures defined with this name. + if fixturedefs is None: + raise FixtureLookupError(argname, self) + # The are no fixtures with this name applicable for the function. + if not fixturedefs: + raise FixtureLookupError(argname, self) index = self._arg2index.get(argname, 0) - 1 - if fixturedefs is None or (-index > len(fixturedefs)): + # The fixture requested its own name, but no remaining to override. + if -index > len(fixturedefs): raise FixtureLookupError(argname, self) self._arg2index[argname] = index return fixturedefs[index] @@ -502,7 +470,7 @@ class FixtureRequest: """Instance (can be None) on which test function was collected.""" # unittest support hack, see _pytest.unittest.TestCaseFunction. try: - return self._pyfuncitem._testcase + return self._pyfuncitem._testcase # type: ignore[attr-defined] except AttributeError: function = getattr(self, "function", None) return getattr(function, "__self__", None) @@ -512,15 +480,16 @@ class FixtureRequest: """Python module object where the test function was collected.""" if self.scope not in ("function", "class", "module"): raise AttributeError(f"module not available in {self.scope}-scoped context") - return self._pyfuncitem.getparent(_pytest.python.Module).obj + mod = self._pyfuncitem.getparent(_pytest.python.Module) + assert mod is not None + return mod.obj @property def path(self) -> Path: """Path where the test function was collected.""" if self.scope not in ("function", "class", "module", "package"): raise AttributeError(f"path not available in {self.scope}-scoped context") - # TODO: Remove ignore once _pyfuncitem is properly typed. - return self._pyfuncitem.path # type: ignore + return self._pyfuncitem.path @property def keywords(self) -> MutableMapping[str, Any]: @@ -533,11 +502,11 @@ class FixtureRequest: """Pytest session object.""" return self._pyfuncitem.session # type: ignore[no-any-return] + @abc.abstractmethod def addfinalizer(self, finalizer: Callable[[], object]) -> None: """Add finalizer/teardown function to be called without arguments after the last test within the requesting test context finished execution.""" - # XXX usually this method is shadowed by fixturedef specific ones. - self.node.addfinalizer(finalizer) + raise NotImplementedError() def applymarker(self, marker: Union[str, MarkDecorator]) -> None: """Apply a marker to a single test function invocation. @@ -558,13 +527,6 @@ class FixtureRequest: """ raise self._fixturemanager.FixtureLookupError(None, self, msg) - def _fillfixtures(self) -> None: - item = self._pyfuncitem - fixturenames = getattr(item, "fixturenames", self.fixturenames) - for argname in fixturenames: - if argname not in item.funcargs: - item.funcargs[argname] = self.getfixturevalue(argname) - def getfixturevalue(self, argname: str) -> Any: """Dynamically run a named fixture function. @@ -592,9 +554,8 @@ class FixtureRequest: def _get_active_fixturedef( self, argname: str ) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]: - try: - return self._fixture_defs[argname] - except KeyError: + fixturedef = self._fixture_defs.get(argname) + if fixturedef is None: try: fixturedef = self._getnextfixturedef(argname) except FixtureLookupError: @@ -602,10 +563,8 @@ class FixtureRequest: cached_result = (self, [0], None) return PseudoFixtureDef(cached_result, Scope.Function) raise - # Remove indent to prevent the python3 exception - # from leaking into the call. - self._compute_fixture_value(fixturedef) - self._fixture_defs[argname] = fixturedef + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef return fixturedef def _get_fixturestack(self) -> List["FixtureDef[Any]"]: @@ -648,13 +607,9 @@ class FixtureRequest: fixtures_not_supported = getattr(funcitem, "nofuncargs", False) if has_params and fixtures_not_supported: msg = ( - "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" - "Node id: {nodeid}\n" - "Function type: {typename}" - ).format( - name=funcitem.name, - nodeid=funcitem.nodeid, - typename=type(funcitem).__name__, + f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" + f"Node id: {funcitem.nodeid}\n" + f"Function type: {type(funcitem).__name__}" ) fail(msg, pytrace=False) if has_params: @@ -698,7 +653,97 @@ class FixtureRequest: self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" ) -> None: # If fixture function failed it might have registered finalizers. - subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest)) + finalizer = functools.partial(fixturedef.finish, request=subrequest) + subrequest.node.addfinalizer(finalizer) + + +@final +class TopRequest(FixtureRequest): + """The type of the ``request`` fixture in a test function.""" + + def __init__(self, pyfuncitem: "Function", *, _ispytest: bool = False) -> None: + super().__init__( + fixturename=None, + pyfuncitem=pyfuncitem, + arg2fixturedefs=pyfuncitem._fixtureinfo.name2fixturedefs.copy(), + arg2index={}, + fixture_defs={}, + _ispytest=_ispytest, + ) + + @property + def _scope(self) -> Scope: + return Scope.Function + + @property + def node(self): + return self._pyfuncitem + + def __repr__(self) -> str: + return "<FixtureRequest for %r>" % (self.node) + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + for argname in item.fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self.node.addfinalizer(finalizer) + + +@final +class SubRequest(FixtureRequest): + """The type of the ``request`` fixture in a fixture function requested + (transitively) by a test function.""" + + def __init__( + self, + request: FixtureRequest, + scope: Scope, + param: Any, + param_index: int, + fixturedef: "FixtureDef[object]", + *, + _ispytest: bool = False, + ) -> None: + super().__init__( + pyfuncitem=request._pyfuncitem, + fixturename=fixturedef.argname, + fixture_defs=request._fixture_defs, + arg2fixturedefs=request._arg2fixturedefs, + arg2index=request._arg2index, + _ispytest=_ispytest, + ) + self._parent_request: Final[FixtureRequest] = request + self._scope_field: Final = scope + self._fixturedef: Final = fixturedef + if param is not NOTSET: + self.param = param + self.param_index: Final = param_index + + def __repr__(self) -> str: + return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>" + + @property + def _scope(self) -> Scope: + return self._scope_field + + @property + def node(self): + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem + elif scope is Scope.Package: + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}' + return node def _check_scope( self, @@ -728,48 +773,13 @@ class FixtureRequest: p = bestrelpath(session.path, fs) else: p = fs - args = _format_args(factory) - lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) + lines.append( + "%s:%d: def %s%s" + % (p, lineno + 1, factory.__name__, inspect.signature(factory)) + ) return lines - def __repr__(self) -> str: - return "<FixtureRequest for %r>" % (self.node) - - -@final -class SubRequest(FixtureRequest): - """A sub request for handling getting a fixture from a test function/fixture.""" - - def __init__( - self, - request: "FixtureRequest", - scope: Scope, - param: Any, - param_index: int, - fixturedef: "FixtureDef[object]", - *, - _ispytest: bool = False, - ) -> None: - check_ispytest(_ispytest) - self._parent_request = request - self.fixturename = fixturedef.argname - if param is not NOTSET: - self.param = param - self.param_index = param_index - self._scope = scope - self._fixturedef = fixturedef - self._pyfuncitem = request._pyfuncitem - self._fixture_defs = request._fixture_defs - self._arg2fixturedefs = request._arg2fixturedefs - self._arg2index = request._arg2index - self._fixturemanager = request._fixturemanager - - def __repr__(self) -> str: - return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>" - def addfinalizer(self, finalizer: Callable[[], object]) -> None: - """Add finalizer/teardown function to be called without arguments after - the last test within the requesting test context finished execution.""" self._fixturedef.addfinalizer(finalizer) def _schedule_finalizers( @@ -778,7 +788,10 @@ class SubRequest(FixtureRequest): # If the executing fixturedef was not explicitly requested in the argument list (via # getfixturevalue inside the fixture call) then ensure this fixture def will be finished # first. - if fixturedef.argname not in self.fixturenames: + if ( + fixturedef.argname not in self._fixture_defs + and fixturedef.argname not in self._pyfuncitem.fixturenames + ): fixturedef.addfinalizer( functools.partial(self._fixturedef.finish, request=self) ) @@ -825,14 +838,16 @@ class FixtureLookupError(LookupError): if msg is None: fm = self.request._fixturemanager available = set() - parentid = self.request._pyfuncitem.parent.nodeid + parent = self.request._pyfuncitem.parent + assert parent is not None + parentid = parent.nodeid for name, fixturedefs in fm._arg2fixturedefs.items(): faclist = list(fm._matchfactories(fixturedefs, parentid)) if faclist: available.add(name) if self.argname in available: - msg = " recursive dependency involving fixture '{}' detected".format( - self.argname + msg = ( + f" recursive dependency involving fixture '{self.argname}' detected" ) else: msg = f"fixture '{self.argname}' not found" @@ -916,25 +931,23 @@ def _teardown_yield_fixture(fixturefunc, it) -> None: def _eval_scope_callable( - scope_callable: "Callable[[str, Config], _ScopeName]", + scope_callable: Callable[[str, Config], _ScopeName], fixture_name: str, config: Config, -) -> "_ScopeName": +) -> _ScopeName: try: # Type ignored because there is no typing mechanism to specify # keyword arguments, currently. result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] except Exception as e: raise TypeError( - "Error evaluating {} while defining fixture '{}'.\n" - "Expected a function with the signature (*, fixture_name, config)".format( - scope_callable, fixture_name - ) + f"Error evaluating {scope_callable} while defining fixture '{fixture_name}'.\n" + "Expected a function with the signature (*, fixture_name, config)" ) from e if not isinstance(result, str): fail( - "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" - "{!r}".format(scope_callable, fixture_name, result), + f"Expected {scope_callable} to return a 'str' while defining fixture '{fixture_name}', but it returned:\n" + f"{result!r}", pytrace=False, ) return result @@ -942,7 +955,11 @@ def _eval_scope_callable( @final class FixtureDef(Generic[FixtureValue]): - """A container for a fixture definition.""" + """A container for a fixture definition. + + Note: At this time, only explicitly documented fields and methods are + considered public stable API. + """ def __init__( self, @@ -950,13 +967,16 @@ class FixtureDef(Generic[FixtureValue]): baseid: Optional[str], argname: str, func: "_FixtureFunc[FixtureValue]", - scope: Union[Scope, "_ScopeName", Callable[[str, Config], "_ScopeName"], None], + scope: Union[Scope, _ScopeName, Callable[[str, Config], _ScopeName], None], params: Optional[Sequence[object]], unittest: bool = False, ids: Optional[ Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]] ] = None, + *, + _ispytest: bool = False, ) -> None: + check_ispytest(_ispytest) self._fixturemanager = fixturemanager # The "base" node ID for the fixture. # @@ -972,15 +992,15 @@ class FixtureDef(Generic[FixtureValue]): # directory path relative to the rootdir. # # For other plugins, the baseid is the empty string (always matches). - self.baseid = baseid or "" + self.baseid: Final = baseid or "" # Whether the fixture was found from a node or a conftest in the # collection tree. Will be false for fixtures defined in non-conftest # plugins. - self.has_location = baseid is not None + self.has_location: Final = baseid is not None # The fixture factory function. - self.func = func + self.func: Final = func # The name by which the fixture may be requested. - self.argname = argname + self.argname: Final = argname if scope is None: scope = Scope.Function elif callable(scope): @@ -989,26 +1009,24 @@ class FixtureDef(Generic[FixtureValue]): scope = Scope.from_user( scope, descr=f"Fixture '{func.__name__}'", where=baseid ) - self._scope = scope + self._scope: Final = scope # If the fixture is directly parametrized, the parameter values. - self.params: Optional[Sequence[object]] = params + self.params: Final = params # If the fixture is directly parametrized, a tuple of explicit IDs to # assign to the parameter values, or a callable to generate an ID given # a parameter value. - self.ids = ids + self.ids: Final = ids # The names requested by the fixtures. - self.argnames = getfuncargnames(func, name=argname, is_method=unittest) + self.argnames: Final = getfuncargnames(func, name=argname, is_method=unittest) # Whether the fixture was collected from a unittest TestCase class. - # Note that it really only makes sense to define autouse fixtures in - # unittest TestCases. - self.unittest = unittest + self.unittest: Final = unittest # If the fixture was executed, the current value of the fixture. # Can change if the fixture is executed with different parameters. self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None - self._finalizers: List[Callable[[], object]] = [] + self._finalizers: Final[List[Callable[[], object]]] = [] @property - def scope(self) -> "_ScopeName": + def scope(self) -> _ScopeName: """Scope string, one of "function", "class", "module", "package", "session".""" return self._scope.value @@ -1036,7 +1054,7 @@ class FixtureDef(Generic[FixtureValue]): # value and remove all finalizers because they may be bound methods # which will keep instances alive. self.cached_result = None - self._finalizers = [] + self._finalizers.clear() def execute(self, request: SubRequest) -> FixtureValue: # Get required arguments and register our own finish() @@ -1050,13 +1068,13 @@ class FixtureDef(Generic[FixtureValue]): my_cache_key = self.cache_key(request) if self.cached_result is not None: + cache_key = self.cached_result[1] # note: comparison with `==` can fail (or be expensive) for e.g. # numpy arrays (#6497). - cache_key = self.cached_result[1] if my_cache_key is cache_key: if self.cached_result[2] is not None: - _, val, tb = self.cached_result[2] - raise val.with_traceback(tb) + exc = self.cached_result[2] + raise exc else: result = self.cached_result[0] return result @@ -1073,9 +1091,7 @@ class FixtureDef(Generic[FixtureValue]): return request.param_index if not hasattr(request, "param") else request.param def __repr__(self) -> str: - return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format( - self.argname, self.scope, self.baseid - ) + return f"<FixtureDef argname={self.argname!r} scope={self.scope!r} baseid={self.baseid!r}>" def resolve_fixture_function( @@ -1096,7 +1112,8 @@ def resolve_fixture_function( # Handle the case where fixture is defined not in a test class, but some other class # (for example a plugin class with a fixture), see #2270. if hasattr(fixturefunc, "__self__") and not isinstance( - request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr] + request.instance, + fixturefunc.__self__.__class__, # type: ignore[union-attr] ): return fixturefunc fixturefunc = getimfunc(fixturedef.func) @@ -1121,35 +1138,18 @@ def pytest_fixture_setup( my_cache_key = fixturedef.cache_key(request) try: result = call_fixture_func(fixturefunc, request, kwargs) - except TEST_OUTCOME: - exc_info = sys.exc_info() - assert exc_info[0] is not None - if isinstance( - exc_info[1], skip.Exception - ) and not fixturefunc.__name__.startswith("xunit_setup"): - exc_info[1]._use_item_location = True # type: ignore[attr-defined] - fixturedef.cached_result = (None, my_cache_key, exc_info) + except TEST_OUTCOME as e: + if isinstance(e, skip.Exception): + # The test requested a fixture which caused a skip. + # Don't show the fixture as the skip location, as then the user + # wouldn't know which test skipped. + e._use_item_location = True + fixturedef.cached_result = (None, my_cache_key, e) raise fixturedef.cached_result = (result, my_cache_key, None) return result -def _ensure_immutable_ids( - ids: Optional[Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]] -) -> Optional[Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]]: - if ids is None: - return None - if callable(ids): - return ids - return tuple(ids) - - -def _params_converter( - params: Optional[Iterable[object]], -) -> Optional[Tuple[object, ...]]: - return tuple(params) if params is not None else None - - def wrap_function_to_error_out_if_called_directly( function: FixtureFunction, fixture_marker: "FixtureFunctionMarker", @@ -1196,18 +1196,19 @@ class FixtureFunctionMarker: if getattr(function, "_pytestfixturefunction", False): raise ValueError( - "fixture is being applied more than once to the same function" + f"@pytest.fixture is being applied more than once to the same function {function.__name__!r}" ) + if hasattr(function, "pytestmark"): + warnings.warn(MARKED_FIXTURE, stacklevel=2) + function = wrap_function_to_error_out_if_called_directly(function, self) name = self.name or function.__name__ if name == "request": location = getlocation(function) fail( - "'request' is a reserved word for fixtures, use another name:\n {}".format( - location - ), + f"'request' is a reserved word for fixtures, use another name:\n {location}", pytrace=False, ) @@ -1373,6 +1374,31 @@ def pytest_addoption(parser: Parser) -> None: ) +def _get_direct_parametrize_args(node: nodes.Node) -> Set[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: Set[str] = set() + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.update(p_argnames) + return parametrize_argnames + + +def deduplicate_names(*seqs: Iterable[str]) -> Tuple[str, ...]: + """De-duplicate the sequence of names while keeping the original order.""" + # Ideally we would use a set, but it does not preserve insertion order. + return tuple(dict.fromkeys(name for seq in seqs for name in seq)) + + class FixtureManager: """pytest fixture definitions and information is stored and managed from this class. @@ -1410,70 +1436,75 @@ class FixtureManager: def __init__(self, session: "Session") -> None: self.session = session self.config: Config = session.config - self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {} - self._holderobjseen: Set[object] = set() + # Maps a fixture name (argname) to all of the FixtureDefs in the test + # suite/plugins defined with this name. Populated by parsefactories(). + # TODO: The order of the FixtureDefs list of each arg is significant, + # explain. + self._arg2fixturedefs: Final[Dict[str, List[FixtureDef[Any]]]] = {} + self._holderobjseen: Final[Set[object]] = set() # A mapping from a nodeid to a list of autouse fixtures it defines. - self._nodeid_autousenames: Dict[str, List[str]] = { + self._nodeid_autousenames: Final[Dict[str, List[str]]] = { "": self.config.getini("usefixtures"), } session.config.pluginmanager.register(self, "funcmanage") - def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]: - """Return all direct parametrization arguments of a node, so we don't - mistake them for fixtures. + def getfixtureinfo( + self, + node: nodes.Item, + func: Optional[Callable[..., object]], + cls: Optional[type], + ) -> FuncFixtureInfo: + """Calculate the :class:`FuncFixtureInfo` for an item. - Check https://github.com/pytest-dev/pytest/issues/5036. + If ``func`` is None, or if the item sets an attribute + ``nofuncargs = True``, then ``func`` is not examined at all. - These things are done later as well when dealing with parametrization - so this could be improved. + :param node: + The item requesting the fixtures. + :param func: + The item's function. + :param cls: + If the function is a method, the method's class. """ - parametrize_argnames: List[str] = [] - for marker in node.iter_markers(name="parametrize"): - if not marker.kwargs.get("indirect", False): - p_argnames, _ = ParameterSet._parse_parametrize_args( - *marker.args, **marker.kwargs - ) - parametrize_argnames.extend(p_argnames) - - return parametrize_argnames - - def getfixtureinfo( - self, node: nodes.Node, func, cls, funcargs: bool = True - ) -> FuncFixtureInfo: - if funcargs and not getattr(node, "nofuncargs", False): + if func is not None and not getattr(node, "nofuncargs", False): argnames = getfuncargnames(func, name=node.name, cls=cls) else: argnames = () + usefixturesnames = self._getusefixturesnames(node) + autousenames = self._getautousenames(node.nodeid) + initialnames = deduplicate_names(autousenames, usefixturesnames, argnames) - usefixtures = tuple( - arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args - ) - initialnames = usefixtures + argnames - fm = node.session._fixturemanager - initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( - initialnames, node, ignore_args=self._get_direct_parametrize_args(node) + direct_parametrize_args = _get_direct_parametrize_args(node) + + names_closure, arg2fixturedefs = self.getfixtureclosure( + parentnode=node, + initialnames=initialnames, + ignore_args=direct_parametrize_args, ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) - def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: - nodeid = None - try: - p = absolutepath(plugin.__file__) # type: ignore[attr-defined] - except AttributeError: - pass + def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None: + # Fixtures defined in conftest plugins are only visible to within the + # conftest's directory. This is unlike fixtures in non-conftest plugins + # which have global visibility. So for conftests, construct the base + # nodeid from the plugin name (which is the conftest path). + if plugin_name and plugin_name.endswith("conftest.py"): + # Note: we explicitly do *not* use `plugin.__file__` here -- The + # difference is that plugin_name has the correct capitalization on + # case-insensitive systems (Windows) and other normalization issues + # (issue #11816). + conftestpath = absolutepath(plugin_name) + try: + nodeid = str(conftestpath.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) else: - # Construct the base nodeid which is later used to check - # what fixtures are visible for particular tests (as denoted - # by their test id). - if p.name.startswith("conftest.py"): - try: - nodeid = str(p.parent.relative_to(self.config.rootpath)) - except ValueError: - nodeid = "" - if nodeid == ".": - nodeid = "" - if os.sep != nodes.SEP: - nodeid = nodeid.replace(os.sep, nodes.SEP) + nodeid = None self.parsefactories(plugin, nodeid) @@ -1484,12 +1515,17 @@ class FixtureManager: if basenames: yield from basenames + def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]: + """Return the names of usefixtures fixtures applicable to node.""" + for mark in node.iter_markers(name="usefixtures"): + yield from mark.args + def getfixtureclosure( self, - fixturenames: Tuple[str, ...], parentnode: nodes.Node, - ignore_args: Sequence[str] = (), - ) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]: + initialnames: Tuple[str, ...], + ignore_args: AbstractSet[str], + ) -> Tuple[List[str], Dict[str, Sequence[FixtureDef[Any]]]]: # Collect the closure of all fixtures, starting with the given # fixturenames as the initial set. As we have to visit all # factory definitions anyway, we also return an arg2fixturedefs @@ -1498,19 +1534,7 @@ class FixtureManager: # (discovering matching fixtures for a given name/node is expensive). parentid = parentnode.nodeid - fixturenames_closure = list(self._getautousenames(parentid)) - - def merge(otherlist: Iterable[str]) -> None: - for arg in otherlist: - if arg not in fixturenames_closure: - fixturenames_closure.append(arg) - - merge(fixturenames) - - # At this point, fixturenames_closure contains what we call "initialnames", - # which is a set of fixturenames the function immediately requests. We - # need to return it as well, so save this. - initialnames = tuple(fixturenames_closure) + fixturenames_closure = list(initialnames) arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {} lastlen = -1 @@ -1524,7 +1548,9 @@ class FixtureManager: fixturedefs = self.getfixturedefs(argname, parentid) if fixturedefs: arg2fixturedefs[argname] = fixturedefs - merge(fixturedefs[-1].argnames) + for arg in fixturedefs[-1].argnames: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) def sort_by_scope(arg_name: str) -> Scope: try: @@ -1535,7 +1561,7 @@ class FixtureManager: return fixturedefs[-1]._scope fixturenames_closure.sort(key=sort_by_scope, reverse=True) - return initialnames, fixturenames_closure, arg2fixturedefs + return fixturenames_closure, arg2fixturedefs def pytest_generate_tests(self, metafunc: "Metafunc") -> None: """Generate new tests based on parametrized fixtures used by the given metafunc""" @@ -1671,6 +1697,7 @@ class FixtureManager: params=marker.params, unittest=unittest, ids=marker.ids, + _ispytest=True, ) faclist = self._arg2fixturedefs.setdefault(name, []) @@ -1692,11 +1719,16 @@ class FixtureManager: def getfixturedefs( self, argname: str, nodeid: str ) -> Optional[Sequence[FixtureDef[Any]]]: - """Get a list of fixtures which are applicable to the given node id. + """Get FixtureDefs for a fixture name which are applicable + to a given node. + + Returns None if there are no fixtures at all defined with the given + name. (This is different from the case in which there are fixtures + with the given name, but none applicable to the node. In this case, + an empty result is returned). - :param str argname: Name of the fixture to search for. - :param str nodeid: Full node id of the requesting test. - :rtype: Sequence[FixtureDef] + :param argname: Name of the fixture to search for. + :param nodeid: Full node id of the requesting test. """ try: fixturedefs = self._arg2fixturedefs[argname] diff --git a/contrib/python/pytest/py3/_pytest/freeze_support.py b/contrib/python/pytest/py3/_pytest/freeze_support.py index 9f8ea231fed..d028058e365 100644 --- a/contrib/python/pytest/py3/_pytest/freeze_support.py +++ b/contrib/python/pytest/py3/_pytest/freeze_support.py @@ -1,5 +1,6 @@ """Provides a function to report all internal modules for using freezing tools.""" + import types from typing import Iterator from typing import List diff --git a/contrib/python/pytest/py3/_pytest/helpconfig.py b/contrib/python/pytest/py3/_pytest/helpconfig.py index ea16c438823..d61c5942b5a 100644 --- a/contrib/python/pytest/py3/_pytest/helpconfig.py +++ b/contrib/python/pytest/py3/_pytest/helpconfig.py @@ -1,17 +1,19 @@ """Version info, help messages, tracing configuration.""" + +from argparse import Action import os import sys -from argparse import Action +from typing import Generator from typing import List from typing import Optional from typing import Union -import pytest from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config import PrintHelp from _pytest.config.argparsing import Parser from _pytest.terminal import TerminalReporter +import pytest class HelpAction(Action): @@ -98,10 +100,9 @@ def pytest_addoption(parser: Parser) -> None: ) -@pytest.hookimpl(hookwrapper=True) -def pytest_cmdline_parse(): - outcome = yield - config: Config = outcome.get_result() +@pytest.hookimpl(wrapper=True) +def pytest_cmdline_parse() -> Generator[None, Config, Config]: + config = yield if config.option.debug: # --debug | --debug <file.log> was provided. @@ -129,13 +130,13 @@ def pytest_cmdline_parse(): config.add_cleanup(unset_tracing) + return config + def showversion(config: Config) -> None: if config.option.version > 1: sys.stdout.write( - "This is pytest version {}, imported from {}\n".format( - pytest.__version__, pytest.__file__ - ) + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" ) plugininfo = getpluginversioninfo(config) if plugininfo: diff --git a/contrib/python/pytest/py3/_pytest/hookspec.py b/contrib/python/pytest/py3/_pytest/hookspec.py index 1f7c368f792..cccd86d26c8 100644 --- a/contrib/python/pytest/py3/_pytest/hookspec.py +++ b/contrib/python/pytest/py3/_pytest/hookspec.py @@ -1,5 +1,6 @@ """Hook specifications for pytest plugins which are invoked by pytest itself and by builtin plugins.""" + from pathlib import Path from typing import Any from typing import Dict @@ -15,17 +16,19 @@ from pluggy import HookspecMarker from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK + if TYPE_CHECKING: import pdb + from typing import Literal import warnings - from typing_extensions import Literal - from _pytest._code.code import ExceptionRepr from _pytest._code.code import ExceptionInfo + from _pytest._code.code import ExceptionRepr + from _pytest.compat import LEGACY_PATH + from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config import PytestPluginManager - from _pytest.config import _PluggyPlugin from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef from _pytest.fixtures import SubRequest @@ -42,7 +45,6 @@ if TYPE_CHECKING: from _pytest.runner import CallInfo from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport - from _pytest.compat import LEGACY_PATH hookspec = HookspecMarker("pytest") @@ -55,26 +57,29 @@ hookspec = HookspecMarker("pytest") @hookspec(historic=True) def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None: """Called at plugin registration time to allow adding new hooks via a call to - ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + :func:`pluginmanager.add_hookspecs(module_or_class, prefix) <pytest.PytestPluginManager.add_hookspecs>`. :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. .. note:: - This hook is incompatible with ``hookwrapper=True``. + This hook is incompatible with hook wrappers. """ @hookspec(historic=True) def pytest_plugin_registered( - plugin: "_PluggyPlugin", manager: "PytestPluginManager" + plugin: "_PluggyPlugin", + plugin_name: str, + manager: "PytestPluginManager", ) -> None: """A new pytest plugin got registered. :param plugin: The plugin module or instance. - :param pytest.PytestPluginManager manager: pytest plugin manager. + :param plugin_name: The name by which the plugin is registered. + :param manager: The pytest plugin manager. .. note:: - This hook is incompatible with ``hookwrapper=True``. + This hook is incompatible with hook wrappers. """ @@ -96,8 +101,8 @@ def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> <pytest.Parser.addini>`. :param pytest.PytestPluginManager pluginmanager: - The pytest plugin manager, which can be used to install :py:func:`hookspec`'s - or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks + The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s + or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks to change how command line options are added. Options can later be accessed through the @@ -113,7 +118,7 @@ def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> attribute or can be retrieved as the ``pytestconfig`` fixture. .. note:: - This hook is incompatible with ``hookwrapper=True``. + This hook is incompatible with hook wrappers. """ @@ -128,7 +133,7 @@ def pytest_configure(config: "Config") -> None: imported. .. note:: - This hook is incompatible with ``hookwrapper=True``. + This hook is incompatible with hook wrappers. :param pytest.Config config: The pytest config object. """ @@ -284,11 +289,35 @@ def pytest_ignore_collect( """ +@hookspec(firstresult=True) +def pytest_collect_directory(path: Path, parent: "Collector") -> "Optional[Collector]": + """Create a :class:`~pytest.Collector` for the given directory, or None if + not relevant. + + .. versionadded:: 8.0 + + For best results, the returned collector should be a subclass of + :class:`~pytest.Directory`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + Stops at first non-None result, see :ref:`firstresult`. + + :param path: The path to analyze. + + See :ref:`custom directory collectors` for a simple example of use of this + hook. + """ + + def pytest_collect_file( file_path: Path, path: "LEGACY_PATH", parent: "Collector" ) -> "Optional[Collector]": """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + For best results, the returned collector should be a subclass of + :class:`~pytest.File`, but this is not required. + The new node needs to have the specified ``parent`` as a parent. :param file_path: The path to analyze. @@ -858,8 +887,8 @@ def pytest_warning_recorded( """Process a warning captured by the internal pytest warnings plugin. :param warning_message: - The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains - the same attributes as the parameters of :py:func:`warnings.showwarning`. + The captured warning. This is the same object produced by :class:`warnings.catch_warnings`, + and contains the same attributes as the parameters of :py:func:`warnings.showwarning`. :param when: Indicates when the warning was captured. Possible values: @@ -940,10 +969,10 @@ def pytest_exception_interact( interactively handled. May be called during collection (see :hook:`pytest_make_collect_report`), - in which case ``report`` is a :class:`CollectReport`. + in which case ``report`` is a :class:`~pytest.CollectReport`. May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), - in which case ``report`` is a :class:`TestReport`. + in which case ``report`` is a :class:`~pytest.TestReport`. This hook is not called if the exception that was raised is an internal exception like ``skip.Exception``. diff --git a/contrib/python/pytest/py3/_pytest/junitxml.py b/contrib/python/pytest/py3/_pytest/junitxml.py index 9ee35b84e84..9009ec7aa0a 100644 --- a/contrib/python/pytest/py3/_pytest/junitxml.py +++ b/contrib/python/pytest/py3/_pytest/junitxml.py @@ -6,12 +6,12 @@ Based on initial code from Ross Lawley. Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd """ + +from datetime import datetime import functools import os import platform import re -import xml.etree.ElementTree as ET -from datetime import datetime from typing import Callable from typing import Dict from typing import List @@ -19,8 +19,8 @@ from typing import Match from typing import Optional from typing import Tuple from typing import Union +import xml.etree.ElementTree as ET -import pytest from _pytest import nodes from _pytest import timing from _pytest._code.code import ExceptionRepr @@ -32,6 +32,7 @@ from _pytest.fixtures import FixtureRequest from _pytest.reports import TestReport from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter +import pytest xml_key = StashKey["LogXML"]() @@ -248,7 +249,9 @@ class _NodeReporter: skipreason = skipreason[9:] details = f"{filename}:{lineno}: {skipreason}" - skipped = ET.Element("skipped", type="pytest.skip", message=skipreason) + skipped = ET.Element( + "skipped", type="pytest.skip", message=bin_xml_escape(skipreason) + ) skipped.text = bin_xml_escape(details) self.append(skipped) self.write_captured_output(report) @@ -271,9 +274,7 @@ def _warn_incompatibility_with_xunit2( if xml is not None and xml.family not in ("xunit1", "legacy"): request.node.warn( PytestWarning( - "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( - fixture_name=fixture_name, family=xml.family - ) + f"{fixture_name} is incompatible with junit_family '{xml.family}' (use 'legacy' or 'xunit1')" ) ) @@ -365,7 +366,6 @@ def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object] `pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See :issue:`7767` for details. """ - __tracebackhide__ = True def record_func(name: str, value: object) -> None: diff --git a/contrib/python/pytest/py3/_pytest/legacypath.py b/contrib/python/pytest/py3/_pytest/legacypath.py index af1d0c07e3c..c459c59aac3 100644 --- a/contrib/python/pytest/py3/_pytest/legacypath.py +++ b/contrib/python/pytest/py3/_pytest/legacypath.py @@ -1,8 +1,11 @@ """Add backward compatibility support for the legacy py path type.""" + import dataclasses +from pathlib import Path import shlex import subprocess -from pathlib import Path +from typing import Final +from typing import final from typing import List from typing import Optional from typing import TYPE_CHECKING @@ -11,7 +14,6 @@ from typing import Union from iniconfig import SectionWrapper from _pytest.cacheprovider import Cache -from _pytest.compat import final from _pytest.compat import LEGACY_PATH from _pytest.compat import legacy_path from _pytest.config import Config @@ -31,9 +33,8 @@ from _pytest.pytester import RunResult from _pytest.terminal import TerminalReporter from _pytest.tmpdir import TempPathFactory -if TYPE_CHECKING: - from typing_extensions import Final +if TYPE_CHECKING: import pexpect @@ -89,7 +90,6 @@ class Testdir: return self._pytester.chdir() def finalize(self) -> None: - """See :meth:`Pytester._finalize`.""" return self._pytester._finalize() def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: @@ -270,7 +270,7 @@ class LegacyTestdirPlugin: @final @dataclasses.dataclass class TempdirFactory: - """Backward compatibility wrapper that implements :class:`py.path.local` + """Backward compatibility wrapper that implements ``py.path.local`` for :class:`TempPathFactory`. .. note:: @@ -289,11 +289,11 @@ class TempdirFactory: self._tmppath_factory = tmppath_factory def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: - """Same as :meth:`TempPathFactory.mktemp`, but returns a :class:`py.path.local` object.""" + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) def getbasetemp(self) -> LEGACY_PATH: - """Same as :meth:`TempPathFactory.getbasetemp`, but returns a :class:`py.path.local` object.""" + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" return legacy_path(self._tmppath_factory.getbasetemp().resolve()) diff --git a/contrib/python/pytest/py3/_pytest/logging.py b/contrib/python/pytest/py3/_pytest/logging.py index 9f2f1c79359..ad7c2dfff42 100644 --- a/contrib/python/pytest/py3/_pytest/logging.py +++ b/contrib/python/pytest/py3/_pytest/logging.py @@ -1,23 +1,29 @@ """Access and control log capturing.""" -import io -import logging -import os -import re + from contextlib import contextmanager from contextlib import nullcontext from datetime import datetime from datetime import timedelta from datetime import timezone +import io from io import StringIO +import logging from logging import LogRecord +import os from pathlib import Path +import re +from types import TracebackType from typing import AbstractSet from typing import Dict +from typing import final from typing import Generator +from typing import Generic from typing import List +from typing import Literal from typing import Mapping from typing import Optional from typing import Tuple +from typing import Type from typing import TYPE_CHECKING from typing import TypeVar from typing import Union @@ -25,7 +31,6 @@ from typing import Union from _pytest import nodes from _pytest._io import TerminalWriter from _pytest.capture import CaptureManager -from _pytest.compat import final from _pytest.config import _strtobool from _pytest.config import Config from _pytest.config import create_terminal_writer @@ -39,10 +44,9 @@ from _pytest.main import Session from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter + if TYPE_CHECKING: logging_StreamHandler = logging.StreamHandler[StringIO] - - from typing_extensions import Literal else: logging_StreamHandler = logging.StreamHandler @@ -63,13 +67,14 @@ class DatetimeFormatter(logging.Formatter): :func:`time.strftime` in case of microseconds in format string. """ - def formatTime(self, record: LogRecord, datefmt=None) -> str: + def formatTime(self, record: LogRecord, datefmt: Optional[str] = None) -> str: if datefmt and "%f" in datefmt: ct = self.converter(record.created) tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) # Construct `datetime.datetime` object from `struct_time` # and msecs information from `record` - dt = datetime(*ct[0:6], microsecond=round(record.msecs * 1000), tzinfo=tz) + # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861). + dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz) return dt.strftime(datefmt) # Use `logging.Formatter` for non-microsecond formats return super().formatTime(record, datefmt) @@ -112,7 +117,6 @@ class ColoredLevelFormatter(DatetimeFormatter): .. warning:: This is an experimental API. """ - assert self._fmt is not None levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) if not levelname_fmt_match: @@ -179,7 +183,6 @@ class PercentStyleMultiline(logging.PercentStyle): 0 (auto-indent turned off) or >0 (explicitly set indentation position). """ - if auto_indent_option is None: return 0 elif isinstance(auto_indent_option, bool): @@ -304,13 +307,13 @@ def pytest_addoption(parser: Parser) -> None: add_option_ini( "--log-file-format", dest="log_file_format", - default=DEFAULT_LOG_FORMAT, + default=None, help="Log format used by the logging module", ) add_option_ini( "--log-file-date-format", dest="log_file_date_format", - default=DEFAULT_LOG_DATE_FORMAT, + default=None, help="Log date format used by the logging module", ) add_option_ini( @@ -332,7 +335,7 @@ _HandlerType = TypeVar("_HandlerType", bound=logging.Handler) # Not using @contextmanager for performance reasons. -class catching_logs: +class catching_logs(Generic[_HandlerType]): """Context manager that prepares the whole logging machinery properly.""" __slots__ = ("handler", "level", "orig_level") @@ -341,7 +344,7 @@ class catching_logs: self.handler = handler self.level = level - def __enter__(self): + def __enter__(self) -> _HandlerType: root_logger = logging.getLogger() if self.level is not None: self.handler.setLevel(self.level) @@ -351,7 +354,12 @@ class catching_logs: root_logger.setLevel(min(self.orig_level, self.level)) return self.handler - def __exit__(self, type, value, traceback): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: root_logger = logging.getLogger() if self.level is not None: root_logger.setLevel(self.orig_level) @@ -422,7 +430,7 @@ class LogCaptureFixture: return self._item.stash[caplog_handler_key] def get_records( - self, when: "Literal['setup', 'call', 'teardown']" + self, when: Literal["setup", "call", "teardown"] ) -> List[logging.LogRecord]: """Get the logging records for one of the possible test phases. @@ -523,7 +531,7 @@ class LogCaptureFixture: The levels of the loggers changed by this function will be restored to their initial values at the end of the test. - Will enable the requested logging level if it was disabled via :meth:`logging.disable`. + Will enable the requested logging level if it was disabled via :func:`logging.disable`. :param level: The level. :param logger: The logger to update. If not given, the root logger. @@ -547,7 +555,7 @@ class LogCaptureFixture: the end of the 'with' statement the level is restored to its original value. - Will enable the requested logging level if it was disabled via :meth:`logging.disable`. + Will enable the requested logging level if it was disabled via :func:`logging.disable`. :param level: The level. :param logger: The logger to update. If not given, the root logger. @@ -565,6 +573,22 @@ class LogCaptureFixture: self.handler.setLevel(handler_orig_level) logging.disable(original_disable_level) + @contextmanager + def filtering(self, filter_: logging.Filter) -> Generator[None, None, None]: + """Context manager that temporarily adds the given filter to the caplog's + :meth:`handler` for the 'with' statement block, and removes that filter at the + end of the block. + + :param filter_: A custom :class:`logging.Filter` object. + + .. versionadded:: 7.5 + """ + self.handler.addFilter(filter_) + try: + yield + finally: + self.handler.removeFilter(filter_) + @fixture def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]: @@ -600,9 +624,9 @@ def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[i except ValueError as e: # Python logging does not recognise this as a logging level raise UsageError( - "'{}' is not recognized as a logging level name for " - "'{}'. Please consider passing the " - "logging level num instead.".format(log_level, setting_name) + f"'{log_level}' is not recognized as a logging level name for " + f"'{setting_name}'. Please consider passing the " + "logging level num instead." ) from e @@ -636,7 +660,9 @@ class LoggingPlugin: self.report_handler.setFormatter(self.formatter) # File logging. - self.log_file_level = get_log_level_for_setting(config, "log_file_level") + self.log_file_level = get_log_level_for_setting( + config, "log_file_level", "log_level" + ) log_file = get_option_ini(config, "log_file") or os.devnull if log_file != os.devnull: directory = os.path.dirname(os.path.abspath(log_file)) @@ -725,7 +751,7 @@ class LoggingPlugin: if old_stream: old_stream.close() - def _log_cli_enabled(self): + def _log_cli_enabled(self) -> bool: """Return whether live logging is enabled.""" enabled = self._config.getoption( "--log-cli-level" @@ -740,27 +766,26 @@ class LoggingPlugin: return True - @hookimpl(hookwrapper=True, tryfirst=True) + @hookimpl(wrapper=True, tryfirst=True) def pytest_sessionstart(self) -> Generator[None, None, None]: self.log_cli_handler.set_when("sessionstart") with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): - yield + return (yield) - @hookimpl(hookwrapper=True, tryfirst=True) + @hookimpl(wrapper=True, tryfirst=True) def pytest_collection(self) -> Generator[None, None, None]: self.log_cli_handler.set_when("collection") with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): - yield + return (yield) - @hookimpl(hookwrapper=True) - def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]: + @hookimpl(wrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]: if session.config.option.collectonly: - yield - return + return (yield) if self._log_cli_enabled() and self._config.getoption("verbose") < 1: # The verbose flag is needed to avoid messy test progress output. @@ -768,7 +793,7 @@ class LoggingPlugin: with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): - yield # Run all the tests. + return (yield) # Run all the tests. @hookimpl def pytest_runtest_logstart(self) -> None: @@ -793,12 +818,13 @@ class LoggingPlugin: item.stash[caplog_records_key][when] = caplog_handler.records item.stash[caplog_handler_key] = caplog_handler - yield - - log = report_handler.stream.getvalue().strip() - item.add_report_section(when, "log", log) + try: + yield + finally: + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]: self.log_cli_handler.set_when("setup") @@ -806,31 +832,33 @@ class LoggingPlugin: item.stash[caplog_records_key] = empty yield from self._runtest_for(item, "setup") - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]: self.log_cli_handler.set_when("call") yield from self._runtest_for(item, "call") - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]: self.log_cli_handler.set_when("teardown") - yield from self._runtest_for(item, "teardown") - del item.stash[caplog_records_key] - del item.stash[caplog_handler_key] + try: + yield from self._runtest_for(item, "teardown") + finally: + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] @hookimpl def pytest_runtest_logfinish(self) -> None: self.log_cli_handler.set_when("finish") - @hookimpl(hookwrapper=True, tryfirst=True) + @hookimpl(wrapper=True, tryfirst=True) def pytest_sessionfinish(self) -> Generator[None, None, None]: self.log_cli_handler.set_when("sessionfinish") with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): - yield + return (yield) @hookimpl def pytest_unconfigure(self) -> None: diff --git a/contrib/python/pytest/py3/_pytest/main.py b/contrib/python/pytest/py3/_pytest/main.py index ea89a63fa1b..fd9dddfa318 100644 --- a/contrib/python/pytest/py3/_pytest/main.py +++ b/contrib/python/pytest/py3/_pytest/main.py @@ -1,29 +1,33 @@ """Core implementation of the testing process: init, session, runtest loop.""" + import argparse import dataclasses import fnmatch import functools import importlib import os -import sys from pathlib import Path +import sys +from typing import AbstractSet from typing import Callable from typing import Dict +from typing import final from typing import FrozenSet +from typing import Iterable from typing import Iterator from typing import List +from typing import Literal from typing import Optional +from typing import overload from typing import Sequence -from typing import Set from typing import Tuple -from typing import Type -from typing import TYPE_CHECKING from typing import Union +import warnings + +import pluggy -import _pytest._code from _pytest import nodes -from _pytest.compat import final -from _pytest.compat import overload +import _pytest._code from _pytest.config import Config from _pytest.config import directory_arg from _pytest.config import ExitCode @@ -31,21 +35,19 @@ from _pytest.config import hookimpl from _pytest.config import PytestPluginManager from _pytest.config import UsageError from _pytest.config.argparsing import Parser +from _pytest.config.compat import PathAwareHookProxy from _pytest.fixtures import FixtureManager from _pytest.outcomes import exit from _pytest.pathlib import absolutepath from _pytest.pathlib import bestrelpath from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import safe_exists -from _pytest.pathlib import visit +from _pytest.pathlib import scandir from _pytest.reports import CollectReport from _pytest.reports import TestReport from _pytest.runner import collect_one_node from _pytest.runner import SetupState - - -if TYPE_CHECKING: - from typing_extensions import Literal +from _pytest.warning_types import PytestWarning def pytest_addoption(parser: Parser) -> None: @@ -377,7 +379,7 @@ def _in_venv(path: Path) -> bool: def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]: ignore_paths = config._getconftest_pathlist( - "collect_ignore", path=collection_path.parent, rootpath=config.rootpath + "collect_ignore", path=collection_path.parent ) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") @@ -388,7 +390,7 @@ def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[boo return True ignore_globs = config._getconftest_pathlist( - "collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath + "collect_ignore_glob", path=collection_path.parent ) ignore_globs = ignore_globs or [] excludeglobopt = config.getoption("ignore_glob") @@ -410,6 +412,12 @@ def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[boo return None +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> Optional[nodes.Collector]: + return Dir.from_parent(parent, path=path) + + def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None: deselect_prefixes = tuple(config.getoption("deselect") or []) if not deselect_prefixes: @@ -429,11 +437,15 @@ def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> No class FSHookProxy: - def __init__(self, pm: PytestPluginManager, remove_mods) -> None: + def __init__( + self, + pm: PytestPluginManager, + remove_mods: AbstractSet[object], + ) -> None: self.pm = pm self.remove_mods = remove_mods - def __getattr__(self, name: str): + def __getattr__(self, name: str) -> pluggy.HookCaller: x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) self.__dict__[name] = x return x @@ -462,7 +474,60 @@ class _bestrelpath_cache(Dict[Path, str]): @final -class Session(nodes.FSCollector): +class Dir(nodes.Directory): + """Collector of files in a file system directory. + + .. versionadded:: 8.0 + + .. note:: + + Python directories with an `__init__.py` file are instead collected by + :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` + collectors. + """ + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: nodes.Collector, # type: ignore[override] + *, + path: Path, + ) -> "Dir": + """The public constructor. + + :param parent: The parent collector of this Dir. + :param path: The directory's path. + """ + return super().from_parent(parent=parent, path=path) # type: ignore[no-any-return] + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + config = self.config + col: Optional[nodes.Collector] + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path): + if direntry.is_dir(): + if direntry.name == "__pycache__": + continue + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +@final +class Session(nodes.Collector): """The root of the collection tree. ``Session`` collects the initial paths given as arguments to pytest. @@ -478,6 +543,7 @@ class Session(nodes.FSCollector): def __init__(self, config: Config) -> None: super().__init__( + name="", path=config.rootpath, fspath=None, parent=None, @@ -487,10 +553,15 @@ class Session(nodes.FSCollector): ) self.testsfailed = 0 self.testscollected = 0 - self.shouldstop: Union[bool, str] = False - self.shouldfail: Union[bool, str] = False + self._shouldstop: Union[bool, str] = False + self._shouldfail: Union[bool, str] = False self.trace = config.trace.root.get("collection") self._initialpaths: FrozenSet[Path] = frozenset() + self._initialpaths_with_parents: FrozenSet[Path] = frozenset() + self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: List[Tuple[Path, List[str]]] = [] + self._collection_cache: Dict[nodes.Collector, CollectReport] = {} + self.items: List[nodes.Item] = [] self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath) @@ -511,6 +582,42 @@ class Session(nodes.FSCollector): ) @property + def shouldstop(self) -> Union[bool, str]: + return self._shouldstop + + @shouldstop.setter + def shouldstop(self, value: Union[bool, str]) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldstop: + warnings.warn( + PytestWarning( + "session.shouldstop cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldstop = value + + @property + def shouldfail(self) -> Union[bool, str]: + return self._shouldfail + + @shouldfail.setter + def shouldfail(self, value: Union[bool, str]) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldfail: + warnings.warn( + PytestWarning( + "session.shouldfail cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldfail = value + + @property def startpath(self) -> Path: """The path from which pytest was invoked. @@ -541,65 +648,77 @@ class Session(nodes.FSCollector): pytest_collectreport = pytest_runtest_logreport - def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + def isinitpath( + self, + path: Union[str, "os.PathLike[str]"], + *, + with_parents: bool = False, + ) -> bool: + """Is path an initial path? + + An initial path is a path explicitly given to pytest on the command + line. + + :param with_parents: + If set, also return True if the path is a parent of an initial path. + + .. versionchanged:: 8.0 + Added the ``with_parents`` parameter. + """ # Optimization: Path(Path(...)) is much slower than isinstance. path_ = path if isinstance(path, Path) else Path(path) - return path_ in self._initialpaths + if with_parents: + return path_ in self._initialpaths_with_parents + else: + return path_ in self._initialpaths - def gethookproxy(self, fspath: "os.PathLike[str]"): + def gethookproxy(self, fspath: "os.PathLike[str]") -> pluggy.HookRelay: # Optimization: Path(Path(...)) is much slower than isinstance. path = fspath if isinstance(fspath, Path) else Path(fspath) pm = self.config.pluginmanager # Check if we have the common case of running # hooks with all conftest.py files. - my_conftestmodules = pm._getconftestmodules( - path, - self.config.getoption("importmode"), - rootpath=self.config.rootpath, - ) + my_conftestmodules = pm._getconftestmodules(path) remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + proxy: pluggy.HookRelay if remove_mods: - # One or more conftests are not in use at this fspath. - from .config.compat import PathAwareHookProxy - - proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) + # One or more conftests are not in use at this path. + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] else: # All plugins are active for this fspath. proxy = self.config.hook return proxy - def _recurse(self, direntry: "os.DirEntry[str]") -> bool: - if direntry.name == "__pycache__": - return False - fspath = Path(direntry.path) - ihook = self.gethookproxy(fspath.parent) - if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): - return False - return True - - def _collectfile( - self, fspath: Path, handle_dupes: bool = True + def _collect_path( + self, + path: Path, + path_cache: Dict[Path, Sequence[nodes.Collector]], ) -> Sequence[nodes.Collector]: - assert ( - fspath.is_file() - ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( - fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() - ) - ihook = self.gethookproxy(fspath) - if not self.isinitpath(fspath): - if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): - return () + """Create a Collector for the given path. - if handle_dupes: - keepduplicates = self.config.getoption("keepduplicates") - if not keepduplicates: - duplicate_paths = self.config.pluginmanager._duplicatepaths - if fspath in duplicate_paths: - return () - else: - duplicate_paths.add(fspath) + `path_cache` makes it so the same Collectors are returned for the same + path. + """ + if path in path_cache: + return path_cache[path] + + if path.is_dir(): + ihook = self.gethookproxy(path.parent) + col: Optional[nodes.Collector] = ihook.pytest_collect_directory( + path=path, parent=self + ) + cols: Sequence[nodes.Collector] = (col,) if col is not None else () - return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] + elif path.is_file(): + ihook = self.gethookproxy(path) + cols = ihook.pytest_collect_file(file_path=path, parent=self) + + else: + # Broken symlink or invalid/missing file. + cols = () + + path_cache[path] = cols + return cols @overload def perform_collect( @@ -635,15 +754,16 @@ class Session(nodes.FSCollector): self.trace("perform_collect", self, args) self.trace.root.indent += 1 - self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] - self._initial_parts: List[Tuple[Path, List[str]]] = [] - self.items: List[nodes.Item] = [] - hook = self.config.hook + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + self.items = [] items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items try: initialpaths: List[Path] = [] + initialpaths_with_parents: List[Path] = [] for arg in args: fspath, parts = resolve_collection_argument( self.config.invocation_params.dir, @@ -652,7 +772,11 @@ class Session(nodes.FSCollector): ) self._initial_parts.append((fspath, parts)) initialpaths.append(fspath) + initialpaths_with_parents.append(fspath) + initialpaths_with_parents.extend(fspath.parents) self._initialpaths = frozenset(initialpaths) + self._initialpaths_with_parents = frozenset(initialpaths_with_parents) + rep = collect_one_node(self) self.ihook.pytest_collectreport(report=rep) self.trace.root.indent -= 1 @@ -661,12 +785,13 @@ class Session(nodes.FSCollector): for arg, collectors in self._notfound: if collectors: errors.append( - f"not found: {arg}\n(no name {arg!r} in any of {collectors!r})" + f"not found: {arg}\n(no match in any of {collectors!r})" ) else: errors.append(f"found no collectors for {arg}") raise UsageError(*errors) + if not genitems: items = rep.result else: @@ -679,154 +804,126 @@ class Session(nodes.FSCollector): session=self, config=self.config, items=items ) finally: + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} hook.pytest_collection_finish(session=self) - self.testscollected = len(items) - return items + if genitems: + self.testscollected = len(items) - def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]: - from _pytest.python import Package + return items - # Keep track of any collected nodes in here, so we don't duplicate fixtures. - node_cache1: Dict[Path, Sequence[nodes.Collector]] = {} - node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {} + def _collect_one_node( + self, + node: nodes.Collector, + handle_dupes: bool = True, + ) -> Tuple[CollectReport, bool]: + if node in self._collection_cache and handle_dupes: + rep = self._collection_cache[node] + return rep, True + else: + rep = collect_one_node(node) + self._collection_cache[node] = rep + return rep, False - # Keep track of any collected collectors in matchnodes paths, so they - # are not collected more than once. - matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {} + def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]: + # This is a cache for the root directories of the initial paths. + # We can't use collection_cache for Session because of its special + # role as the bootstrapping collector. + path_cache: Dict[Path, Sequence[nodes.Collector]] = {} - # Directories of pkgs with dunder-init files. - pkg_roots: Dict[Path, Package] = {} + pm = self.config.pluginmanager for argpath, names in self._initial_parts: self.trace("processing argument", (argpath, names)) self.trace.root.indent += 1 - # Start with a Session root, and delve to argpath item (dir or file) - # and stack all Packages found on the way. - # No point in finding packages when collecting doctests. - if not self.config.getoption("doctestmodules", False): - pm = self.config.pluginmanager - for parent in (argpath, *argpath.parents): - if not pm._is_in_confcutdir(argpath): - break - - if parent.is_dir(): - pkginit = parent / "__init__.py" - if pkginit.is_file() and pkginit not in node_cache1: - col = self._collectfile(pkginit, handle_dupes=False) - if col: - if isinstance(col[0], Package): - pkg_roots[parent] = col[0] - node_cache1[col[0].path] = [col[0]] - - # If it's a directory argument, recurse and look for any Subpackages. - # Let the Package collector deal with subnodes, don't collect here. + # resolve_collection_argument() ensures this. if argpath.is_dir(): assert not names, f"invalid arg {(argpath, names)!r}" - seen_dirs: Set[Path] = set() - for direntry in visit(argpath, self._recurse): - if not direntry.is_file(): - continue - - path = Path(direntry.path) - dirpath = path.parent - - if dirpath not in seen_dirs: - # Collect packages first. - seen_dirs.add(dirpath) - pkginit = dirpath / "__init__.py" - if pkginit.exists(): - for x in self._collectfile(pkginit): - yield x - if isinstance(x, Package): - pkg_roots[dirpath] = x - if dirpath in pkg_roots: - # Do not collect packages here. - continue + # Match the argpath from the root, e.g. + # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] + paths = [*reversed(argpath.parents), argpath] + # Paths outside of the confcutdir should not be considered, unless + # it's the argpath itself. + while len(paths) > 1 and not pm._is_in_confcutdir(paths[0]): + paths = paths[1:] + + # Start going over the parts from the root, collecting each level + # and discarding all nodes which don't match the level's part. + any_matched_in_initial_part = False + notfound_collectors = [] + work: List[ + Tuple[Union[nodes.Collector, nodes.Item], List[Union[Path, str]]] + ] = [(self, paths + names)] + while work: + matchnode, matchparts = work.pop() + + # Pop'd all of the parts, this is a match. + if not matchparts: + yield matchnode + any_matched_in_initial_part = True + continue - for x in self._collectfile(path): - key2 = (type(x), x.path) - if key2 in node_cache2: - yield node_cache2[key2] - else: - node_cache2[key2] = x - yield x - else: - assert argpath.is_file() + # Should have been matched by now, discard. + if not isinstance(matchnode, nodes.Collector): + continue - if argpath in node_cache1: - col = node_cache1[argpath] + # Collect this level of matching. + # Collecting Session (self) is done directly to avoid endless + # recursion to this function. + subnodes: Sequence[Union[nodes.Collector, nodes.Item]] + if isinstance(matchnode, Session): + assert isinstance(matchparts[0], Path) + subnodes = matchnode._collect_path(matchparts[0], path_cache) else: - collect_root = pkg_roots.get(argpath.parent, self) - col = collect_root._collectfile(argpath, handle_dupes=False) - if col: - node_cache1[argpath] = col - - matching = [] - work: List[ - Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]] - ] = [(col, names)] - while work: - self.trace("matchnodes", col, names) - self.trace.root.indent += 1 - - matchnodes, matchnames = work.pop() - for node in matchnodes: - if not matchnames: - matching.append(node) - continue - if not isinstance(node, nodes.Collector): - continue - key = (type(node), node.nodeid) - if key in matchnodes_cache: - rep = matchnodes_cache[key] - else: - rep = collect_one_node(node) - matchnodes_cache[key] = rep - if rep.passed: - submatchnodes = [] - for r in rep.result: - # TODO: Remove parametrized workaround once collection structure contains - # parametrization. - if ( - r.name == matchnames[0] - or r.name.split("[")[0] == matchnames[0] - ): - submatchnodes.append(r) - if submatchnodes: - work.append((submatchnodes, matchnames[1:])) - else: - # Report collection failures here to avoid failing to run some test - # specified in the command line because the module could not be - # imported (#134). - node.ihook.pytest_collectreport(report=rep) - - self.trace("matchnodes finished -> ", len(matching), "nodes") - self.trace.root.indent -= 1 - - if not matching: - report_arg = "::".join((str(argpath), *names)) - self._notfound.append((report_arg, col)) - continue + # For backward compat, files given directly multiple + # times on the command line should not be deduplicated. + handle_dupes = not ( + len(matchparts) == 1 + and isinstance(matchparts[0], Path) + and matchparts[0].is_file() + ) + rep, duplicate = self._collect_one_node(matchnode, handle_dupes) + if not duplicate and not rep.passed: + # Report collection failures here to avoid failing to + # run some test specified in the command line because + # the module could not be imported (#134). + matchnode.ihook.pytest_collectreport(report=rep) + if not rep.passed: + continue + subnodes = rep.result + + # Prune this level. + any_matched_in_collector = False + for node in reversed(subnodes): + # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. + if isinstance(matchparts[0], Path): + is_match = node.path == matchparts[0] + if sys.platform == "win32" and not is_match: + # In case the file paths do not match, fallback to samefile() to + # account for short-paths on Windows (#11895). + is_match = os.path.samefile(node.path, matchparts[0]) + # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. + else: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + is_match = ( + node.name == matchparts[0] + or node.name.split("[")[0] == matchparts[0] + ) + if is_match: + work.append((node, matchparts[1:])) + any_matched_in_collector = True - # If __init__.py was the only file requested, then the matched - # node will be the corresponding Package (by default), and the - # first yielded item will be the __init__ Module itself, so - # just use that. If this special case isn't taken, then all the - # files in the package will be yielded. - if argpath.name == "__init__.py" and isinstance(matching[0], Package): - try: - yield next(iter(matching[0].collect())) - except StopIteration: - # The package collects nothing with only an __init__.py - # file in it, which gets ignored by the default - # "python_files" option. - pass - continue + if not any_matched_in_collector: + notfound_collectors.append(matchnode) - yield from matching + if not any_matched_in_initial_part: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, notfound_collectors)) self.trace.root.indent -= 1 @@ -839,11 +936,17 @@ class Session(nodes.FSCollector): yield node else: assert isinstance(node, nodes.Collector) - rep = collect_one_node(node) + keepduplicates = self.config.getoption("keepduplicates") + # For backward compat, dedup only applies to files. + handle_dupes = not (keepduplicates and isinstance(node, nodes.File)) + rep, duplicate = self._collect_one_node(node, handle_dupes) + if duplicate and not keepduplicates: + return if rep.passed: for subnode in rep.result: yield from self.genitems(subnode) - node.ihook.pytest_collectreport(report=rep) + if not duplicate: + node.ihook.pytest_collectreport(report=rep) def search_pypath(module_name: str) -> str: diff --git a/contrib/python/pytest/py3/_pytest/mark/__init__.py b/contrib/python/pytest/py3/_pytest/mark/__init__.py index de46b4c8a75..4bd9f6563c7 100644 --- a/contrib/python/pytest/py3/_pytest/mark/__init__.py +++ b/contrib/python/pytest/py3/_pytest/mark/__init__.py @@ -1,4 +1,5 @@ """Generic mechanism for marking and selecting python functions.""" + import dataclasses from typing import AbstractSet from typing import Collection @@ -23,6 +24,7 @@ from _pytest.config import UsageError from _pytest.config.argparsing import Parser from _pytest.stash import StashKey + if TYPE_CHECKING: from _pytest.nodes import Item @@ -152,12 +154,19 @@ class KeywordMatcher: def from_item(cls, item: "Item") -> "KeywordMatcher": mapped_names = set() - # Add the names of the current item and any parent items. + # Add the names of the current item and any parent items, + # except the Session and root Directory's which are not + # interesting for matching. import pytest for node in item.listchain(): - if not isinstance(node, pytest.Session): - mapped_names.add(node.name) + if isinstance(node, pytest.Session): + continue + if isinstance(node, pytest.Directory) and isinstance( + node.parent, pytest.Session + ): + continue + mapped_names.add(node.name) # Add the names added as extra keywords to current or parent items. mapped_names.update(item.listextrakeywords()) @@ -260,8 +269,8 @@ def pytest_configure(config: Config) -> None: if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): raise UsageError( - "{!s} must be one of skip, xfail or fail_at_collect" - " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + f"{EMPTY_PARAMETERSET_OPTION!s} must be one of skip, xfail or fail_at_collect" + f" but it is {empty_parameterset!r}" ) diff --git a/contrib/python/pytest/py3/_pytest/mark/expression.py b/contrib/python/pytest/py3/_pytest/mark/expression.py index 9287bcee50c..78b7fda696b 100644 --- a/contrib/python/pytest/py3/_pytest/mark/expression.py +++ b/contrib/python/pytest/py3/_pytest/mark/expression.py @@ -14,11 +14,11 @@ The semantics are: - ident evaluates to True of False according to a provided matcher function. - or/and/not evaluate according to the usual boolean semantics. """ + import ast import dataclasses import enum import re -import sys import types from typing import Callable from typing import Iterator @@ -27,11 +27,6 @@ from typing import NoReturn from typing import Optional from typing import Sequence -if sys.version_info >= (3, 8): - astNameConstant = ast.Constant -else: - astNameConstant = ast.NameConstant - __all__ = [ "Expression", @@ -138,7 +133,7 @@ IDENT_PREFIX = "$" def expression(s: Scanner) -> ast.Expression: if s.accept(TokenType.EOF): - ret: ast.expr = astNameConstant(False) + ret: ast.expr = ast.Constant(False) else: ret = expr(s) s.accept(TokenType.EOF, reject=True) diff --git a/contrib/python/pytest/py3/_pytest/mark/structures.py b/contrib/python/pytest/py3/_pytest/mark/structures.py index 32bdc7e38b7..2f2a357f0ba 100644 --- a/contrib/python/pytest/py3/_pytest/mark/structures.py +++ b/contrib/python/pytest/py3/_pytest/mark/structures.py @@ -1,10 +1,10 @@ import collections.abc import dataclasses import inspect -import warnings from typing import Any from typing import Callable from typing import Collection +from typing import final from typing import Iterable from typing import Iterator from typing import List @@ -20,17 +20,19 @@ from typing import Type from typing import TYPE_CHECKING from typing import TypeVar from typing import Union +import warnings from .._code import getfslineno from ..compat import ascii_escaped -from ..compat import final from ..compat import NOTSET from ..compat import NotSetType from _pytest.config import Config from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE from _pytest.outcomes import fail from _pytest.warning_types import PytestUnknownMarkWarning + if TYPE_CHECKING: from ..nodes import Node @@ -110,7 +112,6 @@ class ParameterSet(NamedTuple): Enforce tuple wrapping so single argument tuple values don't get decomposed and break tests. """ - if isinstance(parameterset, cls): return parameterset if force_tuple: @@ -270,8 +271,8 @@ class MarkDecorator: ``MarkDecorators`` are created with ``pytest.mark``:: - mark1 = pytest.mark.NAME # Simple MarkDecorator - mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator and can then be applied as decorators to test functions:: @@ -392,7 +393,7 @@ def get_unpacked_marks( def normalize_mark_list( - mark_list: Iterable[Union[Mark, MarkDecorator]] + mark_list: Iterable[Union[Mark, MarkDecorator]], ) -> Iterable[Mark]: """ Normalize an iterable of Mark or MarkDecorator objects into a list of marks @@ -414,6 +415,12 @@ def store_mark(obj, mark: Mark) -> None: This is used to implement the Mark declarations/decorators correctly. """ assert isinstance(mark, Mark), mark + + from ..fixtures import getfixturemarker + + if getfixturemarker(obj) is not None: + warnings.warn(MARKED_FIXTURE, stacklevel=2) + # Always reassign name to avoid updating pytestmark in a reference that # was only borrowed. obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] @@ -450,11 +457,13 @@ if TYPE_CHECKING: @overload def __call__( self, - condition: Union[str, bool] = ..., + condition: Union[str, bool] = False, *conditions: Union[str, bool], reason: str = ..., run: bool = ..., - raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ..., + raises: Union[ + None, Type[BaseException], Tuple[Type[BaseException], ...] + ] = ..., strict: bool = ..., ) -> MarkDecorator: ... @@ -494,9 +503,10 @@ class MarkGenerator: import pytest + @pytest.mark.slowtest def test_function(): - pass + pass applies a 'slowtest' :class:`Mark` on ``test_function``. """ diff --git a/contrib/python/pytest/py3/_pytest/monkeypatch.py b/contrib/python/pytest/py3/_pytest/monkeypatch.py index 9e51ff33538..3e5ec3d21dd 100644 --- a/contrib/python/pytest/py3/_pytest/monkeypatch.py +++ b/contrib/python/pytest/py3/_pytest/monkeypatch.py @@ -1,10 +1,11 @@ """Monkeypatching and mocking functionality.""" + +from contextlib import contextmanager import os import re import sys -import warnings -from contextlib import contextmanager from typing import Any +from typing import final from typing import Generator from typing import List from typing import Mapping @@ -14,11 +15,12 @@ from typing import overload from typing import Tuple from typing import TypeVar from typing import Union +import warnings -from _pytest.compat import final from _pytest.fixtures import fixture from _pytest.warning_types import PytestWarning + RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") @@ -89,9 +91,7 @@ def annotated_getattr(obj: object, name: str, ann: str) -> object: obj = getattr(obj, name) except AttributeError as e: raise AttributeError( - "{!r} object at {} has no attribute {!r}".format( - type(obj).__name__, ann, name - ) + f"{type(obj).__name__!r} object at {ann} has no attribute {name!r}" ) from e return obj @@ -141,7 +141,6 @@ class MonkeyPatch: which undoes any patching done inside the ``with`` block upon exit. Example: - .. code-block:: python import functools @@ -321,10 +320,8 @@ class MonkeyPatch: if not isinstance(value, str): warnings.warn( # type: ignore[unreachable] PytestWarning( - "Value of environment variable {name} type should be str, but got " - "{value!r} (type: {type}); converted to str implicitly".format( - name=name, value=value, type=type(value).__name__ - ) + f"Value of environment variable {name} type should be str, but got " + f"{value!r} (type: {type(value).__name__}); converted to str implicitly" ), stacklevel=2, ) @@ -344,7 +341,6 @@ class MonkeyPatch: def syspath_prepend(self, path) -> None: """Prepend ``path`` to ``sys.path`` list of import locations.""" - if self._savesyspath is None: self._savesyspath = sys.path[:] sys.path.insert(0, str(path)) diff --git a/contrib/python/pytest/py3/_pytest/nodes.py b/contrib/python/pytest/py3/_pytest/nodes.py index a5313cb7656..73efe156e2f 100644 --- a/contrib/python/pytest/py3/_pytest/nodes.py +++ b/contrib/python/pytest/py3/_pytest/nodes.py @@ -1,6 +1,8 @@ -import os -import warnings +import abc +from functools import cached_property from inspect import signature +import os +import pathlib from pathlib import Path from typing import Any from typing import Callable @@ -17,16 +19,19 @@ from typing import Type from typing import TYPE_CHECKING from typing import TypeVar from typing import Union +import warnings + +import pluggy import _pytest._code from _pytest._code import getfslineno from _pytest._code.code import ExceptionInfo from _pytest._code.code import TerminalRepr from _pytest._code.code import Traceback -from _pytest.compat import cached_property from _pytest.compat import LEGACY_PATH from _pytest.config import Config from _pytest.config import ConftestImportFailure +from _pytest.config.compat import _check_path from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH from _pytest.deprecated import NODE_CTOR_FSPATH_ARG from _pytest.mark.structures import Mark @@ -38,10 +43,11 @@ from _pytest.pathlib import commonpath from _pytest.stash import Stash from _pytest.warning_types import PytestWarning + if TYPE_CHECKING: # Imported here due to circular import. - from _pytest.main import Session from _pytest._code.code import _TracebackStyle + from _pytest.main import Session SEP = "/" @@ -94,14 +100,6 @@ def iterparentnodeids(nodeid: str) -> Iterator[str]: yield nodeid -def _check_path(path: Path, fspath: LEGACY_PATH) -> None: - if Path(fspath) != path: - raise ValueError( - f"Path({fspath!r}) != {path!r}\n" - "if both path and fspath are given they need to be equal" - ) - - def _imply_path( node_type: Type["Node"], path: Optional[Path], @@ -126,7 +124,21 @@ def _imply_path( _NodeType = TypeVar("_NodeType", bound="Node") -class NodeMeta(type): +class NodeMeta(abc.ABCMeta): + """Metaclass used by :class:`Node` to enforce that direct construction raises + :class:`Failed`. + + This behaviour supports the indirection introduced with :meth:`Node.from_parent`, + the named constructor to be used instead of direct construction. The design + decision to enforce indirection with :class:`NodeMeta` was made as a + temporary aid for refactoring the collection tree, which was diagnosed to + have :class:`Node` objects whose creational patterns were overly entangled. + Once the refactoring is complete, this metaclass can be removed. + + See https://github.com/pytest-dev/pytest/projects/3 for an overview of the + progress on detangling the :class:`Node` classes. + """ + def __call__(self, *k, **kw): msg = ( "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" @@ -156,7 +168,7 @@ class NodeMeta(type): return super().__call__(*k, **known_kw) -class Node(metaclass=NodeMeta): +class Node(abc.ABC, metaclass=NodeMeta): r"""Base class of :class:`Collector` and :class:`Item`, the components of the test collection tree. @@ -167,9 +179,9 @@ class Node(metaclass=NodeMeta): # Implemented in the legacypath plugin. #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage #: for methods not migrated to ``pathlib.Path`` yet, such as - #: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer - #: using :attr:`path` instead. + #: :meth:`Item.reportinfo <pytest.Item.reportinfo>`. Will be deprecated in fspath: LEGACY_PATH + #: a future release, prefer using :attr:`path` instead. # Use __slots__ to make attribute access faster. # Note that __dict__ is still available. @@ -219,7 +231,7 @@ class Node(metaclass=NodeMeta): if path is None and fspath is None: path = getattr(parent, "path", None) #: Filesystem path where this node was collected from (can be None). - self.path: Path = _imply_path(type(self), path, fspath=fspath) + self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath) # The explicit annotation is to avoid publicly exposing NodeKeywords. #: Keywords/markers collected from all scopes. @@ -264,7 +276,7 @@ class Node(metaclass=NodeMeta): return cls._create(parent=parent, **kw) @property - def ihook(self): + def ihook(self) -> pluggy.HookRelay: """fspath-sensitive hook proxy used to call pytest hooks.""" return self.session.gethookproxy(self.path) @@ -295,9 +307,7 @@ class Node(metaclass=NodeMeta): # enforce type checks here to avoid getting a generic type error later otherwise. if not isinstance(warning, Warning): raise ValueError( - "warning must be an instance of Warning or subclass, got {!r}".format( - warning - ) + f"warning must be an instance of Warning or subclass, got {warning!r}" ) path, lineno = get_fslocation_from_item(self) assert lineno is not None @@ -525,7 +535,7 @@ def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[i return getattr(node, "fspath", "unknown location"), -1 -class Collector(Node): +class Collector(Node, abc.ABC): """Base class of all collectors. Collector create children through `collect()` and thus iteratively build @@ -535,6 +545,7 @@ class Collector(Node): class CollectError(Exception): """An error during collection, contains a custom message.""" + @abc.abstractmethod def collect(self) -> Iterable[Union["Item", "Collector"]]: """Collect children (items and collectors) for this collector.""" raise NotImplementedError("abstract") @@ -579,7 +590,7 @@ def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[ return None -class FSCollector(Collector): +class FSCollector(Collector, abc.ABC): """Base class for filesystem collectors.""" def __init__( @@ -657,14 +668,32 @@ class FSCollector(Collector): return self.session.isinitpath(path) -class File(FSCollector): +class File(FSCollector, abc.ABC): """Base class for collecting tests from a file. :ref:`non-python tests`. """ -class Item(Node): +class Directory(FSCollector, abc.ABC): + """Base class for collecting files from a directory. + + A basic directory collector does the following: goes over the files and + sub-directories in the directory and creates collectors for them by calling + the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, + after checking that they are not ignored using + :hook:`pytest_ignore_collect`. + + The default directory collectors are :class:`~pytest.Dir` and + :class:`~pytest.Package`. + + .. versionadded:: 8.0 + + :ref:`custom directory collectors`. + """ + + +class Item(Node, abc.ABC): """Base class of all test invocation items. Note that for a single function there might be multiple test invocation items. @@ -730,6 +759,7 @@ class Item(Node): PytestWarning, ) + @abc.abstractmethod def runtest(self) -> None: """Run the test case for this item. diff --git a/contrib/python/pytest/py3/_pytest/nose.py b/contrib/python/pytest/py3/_pytest/nose.py index 273bd045fb6..bf6ebed0273 100644 --- a/contrib/python/pytest/py3/_pytest/nose.py +++ b/contrib/python/pytest/py3/_pytest/nose.py @@ -1,4 +1,5 @@ """Run testsuites written for nose.""" + import warnings from _pytest.config import hookimpl diff --git a/contrib/python/pytest/py3/_pytest/outcomes.py b/contrib/python/pytest/py3/_pytest/outcomes.py index 53c3e1511cb..afdefa2a1b0 100644 --- a/contrib/python/pytest/py3/_pytest/outcomes.py +++ b/contrib/python/pytest/py3/_pytest/outcomes.py @@ -1,29 +1,19 @@ """Exception classes and constants handling test outcomes as well as functions creating them.""" + import sys -import warnings from typing import Any from typing import Callable from typing import cast from typing import NoReturn from typing import Optional +from typing import Protocol from typing import Type from typing import TypeVar +import warnings from _pytest.deprecated import KEYWORD_MSG_ARG -TYPE_CHECKING = False # Avoid circular import through compat. - -if TYPE_CHECKING: - from typing_extensions import Protocol -else: - # typing.Protocol is only available starting from Python 3.8. It is also - # available from typing_extensions, but we don't want a runtime dependency - # on that. So use a dummy runtime implementation. - from typing import Generic - - Protocol = Generic - class OutcomeException(BaseException): """OutcomeException and its subclass instances indicate and contain info @@ -244,6 +234,9 @@ def xfail(reason: str = "") -> NoReturn: This function should be called only during testing (setup, call or teardown). + No other code is executed after using ``xfail()`` (it is implemented + internally by raising an exception). + :param reason: The message to show the user as reason for the xfail. @@ -304,8 +297,7 @@ def importorskip( if verattr is None or Version(verattr) < Version(minversion): raise Skipped( - "module %r has __version__ %r, required is: %r" - % (modname, verattr, minversion), + f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", allow_module_level=True, ) return mod diff --git a/contrib/python/pytest/py3/_pytest/pastebin.py b/contrib/python/pytest/py3/_pytest/pastebin.py index 22c7a622373..06b1f9ca99d 100644 --- a/contrib/python/pytest/py3/_pytest/pastebin.py +++ b/contrib/python/pytest/py3/_pytest/pastebin.py @@ -1,15 +1,16 @@ """Submit failure or test session information to a pastebin service.""" -import tempfile + from io import StringIO +import tempfile from typing import IO from typing import Union -import pytest from _pytest.config import Config from _pytest.config import create_terminal_writer from _pytest.config.argparsing import Parser from _pytest.stash import StashKey from _pytest.terminal import TerminalReporter +import pytest pastebinfile_key = StashKey[IO[bytes]]() @@ -73,8 +74,8 @@ def create_new_paste(contents: Union[str, bytes]) -> str: :returns: URL to the pasted contents, or an error message. """ import re - from urllib.request import urlopen from urllib.parse import urlencode + from urllib.request import urlopen params = {"code": contents, "lexer": "text", "expiry": "1week"} url = "https://bpa.st" diff --git a/contrib/python/pytest/py3/_pytest/pathlib.py b/contrib/python/pytest/py3/_pytest/pathlib.py index c2f8535f5f5..afe880944f0 100644 --- a/contrib/python/pytest/py3/_pytest/pathlib.py +++ b/contrib/python/pytest/py3/_pytest/pathlib.py @@ -1,20 +1,15 @@ import atexit import contextlib -import fnmatch -import importlib.util -import itertools -import os -import shutil -import sys -import types -import uuid -import warnings from enum import Enum from errno import EBADF from errno import ELOOP from errno import ENOENT from errno import ENOTDIR +import fnmatch from functools import partial +import importlib.util +import itertools +import os from os.path import expanduser from os.path import expandvars from os.path import isabs @@ -22,6 +17,9 @@ from os.path import sep from pathlib import Path from pathlib import PurePath from posixpath import sep as posix_sep +import shutil +import sys +import types from types import ModuleType from typing import Callable from typing import Dict @@ -34,11 +32,14 @@ from typing import Tuple from typing import Type from typing import TypeVar from typing import Union +import uuid +import warnings from _pytest.compat import assert_never from _pytest.outcomes import skip from _pytest.warning_types import PytestWarning + LOCK_TIMEOUT = 60 * 60 * 24 * 3 @@ -101,9 +102,7 @@ def on_rm_rf_error( if func not in (os.open,): warnings.warn( PytestWarning( - "(rm_rf) unknown function {} when removing {}:\n{}: {}".format( - func, path, type(exc), exc - ) + f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}" ) ) return False @@ -242,7 +241,7 @@ def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: else: raise OSError( "could not create numbered dir with prefix " - "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + f"{prefix} in {root} after 10 tries" ) @@ -681,7 +680,7 @@ def resolve_package_path(path: Path) -> Optional[Path]: result = None for parent in itertools.chain((path,), path.parents): if parent.is_dir(): - if not parent.joinpath("__init__.py").is_file(): + if not (parent / "__init__.py").is_file(): break if not parent.name.isidentifier(): break @@ -689,10 +688,14 @@ def resolve_package_path(path: Path) -> Optional[Path]: return result -def scandir(path: Union[str, "os.PathLike[str]"]) -> List["os.DirEntry[str]"]: +def scandir( + path: Union[str, "os.PathLike[str]"], + sort_key: Callable[["os.DirEntry[str]"], object] = lambda entry: entry.name, +) -> List["os.DirEntry[str]"]: """Scan a directory recursively, in breadth-first order. - The returned entries are sorted. + The returned entries are sorted according to the given key. + The default is to sort by name. """ entries = [] with os.scandir(path) as s: @@ -706,7 +709,7 @@ def scandir(path: Union[str, "os.PathLike[str]"]) -> List["os.DirEntry[str]"]: continue raise entries.append(entry) - entries.sort(key=lambda entry: entry.name) + entries.sort(key=sort_key) # type: ignore[arg-type] return entries @@ -776,24 +779,6 @@ def bestrelpath(directory: Path, dest: Path) -> str: ) -# Originates from py. path.local.copy(), with siginficant trims and adjustments. -# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True) -def copytree(source: Path, target: Path) -> None: - """Recursively copy a source directory to target.""" - assert source.is_dir() - for entry in visit(source, recurse=lambda entry: not entry.is_symlink()): - x = Path(entry) - relpath = x.relative_to(source) - newx = target / relpath - newx.parent.mkdir(exist_ok=True) - if x.is_symlink(): - newx.symlink_to(os.readlink(x)) - elif x.is_file(): - shutil.copyfile(x, newx) - elif x.is_dir(): - newx.mkdir(exist_ok=True) - - def safe_exists(p: Path) -> bool: """Like Path.exists(), but account for input arguments that might be too long (#11394).""" try: diff --git a/contrib/python/pytest/py3/_pytest/pytester.py b/contrib/python/pytest/py3/_pytest/pytester.py index 0771065e065..00bd7b02cbd 100644 --- a/contrib/python/pytest/py3/_pytest/pytester.py +++ b/contrib/python/pytest/py3/_pytest/pytester.py @@ -2,28 +2,32 @@ PYTEST_DONT_REWRITE """ + import collections.abc import contextlib +from fnmatch import fnmatch import gc import importlib +from io import StringIO import locale import os +from pathlib import Path import platform import re import shutil import subprocess import sys import traceback -from fnmatch import fnmatch -from io import StringIO -from pathlib import Path from typing import Any from typing import Callable from typing import Dict +from typing import Final +from typing import final from typing import Generator from typing import IO from typing import Iterable from typing import List +from typing import Literal from typing import Optional from typing import overload from typing import Sequence @@ -40,7 +44,6 @@ from iniconfig import SectionWrapper from _pytest import timing from _pytest._code import Source from _pytest.capture import _get_multicapture -from _pytest.compat import final from _pytest.compat import NOTSET from _pytest.compat import NotSetType from _pytest.config import _PluggyPlugin @@ -61,7 +64,6 @@ from _pytest.outcomes import fail from _pytest.outcomes import importorskip from _pytest.outcomes import skip from _pytest.pathlib import bestrelpath -from _pytest.pathlib import copytree from _pytest.pathlib import make_numbered_dir from _pytest.reports import CollectReport from _pytest.reports import TestReport @@ -70,9 +72,6 @@ from _pytest.warning_types import PytestWarning if TYPE_CHECKING: - from typing_extensions import Final - from typing_extensions import Literal - import pexpect @@ -124,13 +123,18 @@ def pytest_configure(config: Config) -> None: class LsofFdLeakChecker: def get_open_files(self) -> List[Tuple[str, str]]: + if sys.version_info >= (3, 11): + # New in Python 3.11, ignores utf-8 mode + encoding = locale.getencoding() + else: + encoding = locale.getpreferredencoding(False) out = subprocess.run( ("lsof", "-Ffn0", "-p", str(os.getpid())), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True, text=True, - encoding=locale.getpreferredencoding(False), + encoding=encoding, ).stdout def isopen(line: str) -> bool: @@ -163,29 +167,31 @@ class LsofFdLeakChecker: else: return True - @hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: + @hookimpl(wrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]: lines1 = self.get_open_files() - yield - if hasattr(sys, "pypy_version_info"): - gc.collect() - lines2 = self.get_open_files() - - new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} - leaked_files = [t for t in lines2 if t[0] in new_fds] - if leaked_files: - error = [ - "***** %s FD leakage detected" % len(leaked_files), - *(str(f) for f in leaked_files), - "*** Before:", - *(str(f) for f in lines1), - "*** After:", - *(str(f) for f in lines2), - "***** %s FD leakage detected" % len(leaked_files), - "*** function %s:%s: %s " % item.location, - "See issue #2366", - ] - item.warn(PytestWarning("\n".join(error))) + try: + return (yield) + finally: + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + "***** %s FD leakage detected" % len(leaked_files), + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + "***** %s FD leakage detected" % len(leaked_files), + "*** function {}:{}: {} ".format(*item.location), + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) # used at least by pytest-xdist plugin @@ -371,14 +377,12 @@ class HookRecorder: values.append(rep) if not values: raise ValueError( - "could not find test report matching %r: " - "no test reports at all!" % (inamepart,) + f"could not find test report matching {inamepart!r}: " + "no test reports at all!" ) if len(values) > 1: raise ValueError( - "found 2 or more testreports matching {!r}: {}".format( - inamepart, values - ) + f"found 2 or more testreports matching {inamepart!r}: {values}" ) return values[0] @@ -626,14 +630,6 @@ class RunResult: ) -class CwdSnapshot: - def __init__(self) -> None: - self.__saved = os.getcwd() - - def restore(self) -> None: - os.chdir(self.__saved) - - class SysModulesSnapshot: def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None: self.__preserve = preserve @@ -697,15 +693,14 @@ class Pytester: #: be added to the list. The type of items to add to the list depends on #: the method using them so refer to them for details. self.plugins: List[Union[str, _PluggyPlugin]] = [] - self._cwd_snapshot = CwdSnapshot() self._sys_path_snapshot = SysPathsSnapshot() self._sys_modules_snapshot = self.__take_sys_modules_snapshot() - self.chdir() self._request.addfinalizer(self._finalize) self._method = self._request.config.getoption("--runpytest") self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) self._monkeypatch = mp = monkeypatch + self.chdir() mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) # Ensure no unexpected caching via tox. mp.delenv("TOX_ENV_DIR", raising=False) @@ -736,7 +731,6 @@ class Pytester: """ self._sys_modules_snapshot.restore() self._sys_path_snapshot.restore() - self._cwd_snapshot.restore() def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: # Some zope modules used by twisted-related tests keep internal state @@ -761,7 +755,7 @@ class Pytester: This is done automatically upon instantiation. """ - os.chdir(self.path) + self._monkeypatch.chdir(self.path) def _makefile( self, @@ -813,7 +807,6 @@ class Pytester: The first created file. Examples: - .. code-block:: python pytester.makefile(".txt", "line1", "line2") @@ -830,7 +823,7 @@ class Pytester: return self._makefile(ext, args, kwargs) def makeconftest(self, source: str) -> Path: - """Write a contest.py file. + """Write a conftest.py file. :param source: The contents. :returns: The conftest.py file. @@ -867,7 +860,6 @@ class Pytester: existing files. Examples: - .. code-block:: python def test_something(pytester): @@ -887,7 +879,6 @@ class Pytester: existing files. Examples: - .. code-block:: python def test_something(pytester): @@ -973,7 +964,7 @@ class Pytester: example_path = example_dir.joinpath(name) if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): - copytree(example_path, self.path) + shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) return self.path elif example_path.is_file(): result = self.path.joinpath(example_path.name) @@ -1050,7 +1041,7 @@ class Pytester: The calling test instance (class containing the test method) must provide a ``.getrunner()`` method which should return a runner which can run the test protocol for a single item, e.g. - :py:func:`_pytest.runner.runtestprotocol`. + ``_pytest.runner.runtestprotocol``. """ # used from runner functional tests item = self.getitem(source) @@ -1277,9 +1268,7 @@ class Pytester: for item in items: if item.name == funcname: return item - assert 0, "{!r} item not found in module:\n{}\nitems: {}".format( - funcname, source, items - ) + assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" def getitems(self, source: Union[str, "os.PathLike[str]"]) -> List[Item]: """Return all test items collected from the module. @@ -1401,7 +1390,7 @@ class Pytester: :param stdin: Optional standard input. - - If it is :py:attr:`CLOSE_STDIN` (Default), then this method calls + - If it is ``CLOSE_STDIN`` (Default), then this method calls :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and the standard input is closed immediately after the new command is started. @@ -1438,10 +1427,7 @@ class Pytester: def handle_timeout() -> None: __tracebackhide__ = True - timeout_message = ( - "{seconds} second timeout expired running:" - " {command}".format(seconds=timeout, command=cmdargs) - ) + timeout_message = f"{timeout} second timeout expired running: {cmdargs}" popen.kill() popen.wait() diff --git a/contrib/python/pytest/py3/_pytest/pytester_assertions.py b/contrib/python/pytest/py3/_pytest/pytester_assertions.py index 657e4db5fc3..d20c2bb5999 100644 --- a/contrib/python/pytest/py3/_pytest/pytester_assertions.py +++ b/contrib/python/pytest/py3/_pytest/pytester_assertions.py @@ -1,4 +1,5 @@ """Helper plugin for pytester; should not be loaded on its own.""" + # This plugin contains assertions used by pytester. pytester cannot # contain them itself, since it is imported by the `pytest` module, # hence cannot be subject to assertion rewriting, which requires a diff --git a/contrib/python/pytest/py3/_pytest/python.py b/contrib/python/pytest/py3/_pytest/python.py index 8b9dd9d1a5b..fa9ac9f0ddc 100644 --- a/contrib/python/pytest/py3/_pytest/python.py +++ b/contrib/python/pytest/py3/_pytest/python.py @@ -1,32 +1,35 @@ """Python test discovery, setup and run of test functions.""" + +import abc +from collections import Counter +from collections import defaultdict import dataclasses import enum import fnmatch +from functools import partial import inspect import itertools import os +from pathlib import Path import sys import types -import warnings -from collections import Counter -from collections import defaultdict -from functools import partial -from pathlib import Path from typing import Any from typing import Callable from typing import Dict +from typing import final from typing import Generator from typing import Iterable from typing import Iterator from typing import List +from typing import Literal from typing import Mapping from typing import Optional from typing import Pattern from typing import Sequence from typing import Set from typing import Tuple -from typing import TYPE_CHECKING from typing import Union +import warnings import _pytest from _pytest import fixtures @@ -39,8 +42,6 @@ from _pytest._code.code import Traceback from _pytest._io import TerminalWriter from _pytest._io.saferepr import saferepr from _pytest.compat import ascii_escaped -from _pytest.compat import assert_never -from _pytest.compat import final from _pytest.compat import get_default_arg_names from _pytest.compat import get_real_func from _pytest.compat import getimfunc @@ -59,7 +60,10 @@ from _pytest.config.argparsing import Parser from _pytest.deprecated import check_ispytest from _pytest.deprecated import INSTANCE_COLLECTOR from _pytest.deprecated import NOSE_SUPPORT_METHOD +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import FixtureRequest from _pytest.fixtures import FuncFixtureInfo +from _pytest.fixtures import get_scope_node from _pytest.main import Session from _pytest.mark import MARK_GEN from _pytest.mark import ParameterSet @@ -73,18 +77,14 @@ from _pytest.pathlib import bestrelpath from _pytest.pathlib import fnmatch_ex from _pytest.pathlib import import_path from _pytest.pathlib import ImportPathMismatchError -from _pytest.pathlib import parts -from _pytest.pathlib import visit +from _pytest.pathlib import scandir +from _pytest.scope import _ScopeName from _pytest.scope import Scope +from _pytest.stash import StashKey from _pytest.warning_types import PytestCollectionWarning from _pytest.warning_types import PytestReturnNotNoneWarning from _pytest.warning_types import PytestUnhandledCoroutineWarning -if TYPE_CHECKING: - from typing_extensions import Literal - - from _pytest.scope import _ScopeName - _PYTEST_DIR = Path(_pytest.__file__).parent @@ -204,11 +204,21 @@ def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: return True +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> Optional[nodes.Collector]: + pkginit = path / "__init__.py" + if pkginit.is_file(): + pkg: Package = Package.from_parent(parent, path=path) + return pkg + return None + + def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]: if file_path.suffix == ".py": if not parent.session.isinitpath(file_path): if not path_matches_patterns( - file_path, parent.config.getini("python_files") + ["__init__.py"] + file_path, parent.config.getini("python_files") ): return None ihook = parent.session.gethookproxy(file_path) @@ -225,9 +235,6 @@ def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module": - if module_path.name == "__init__.py": - pkg: Package = Package.from_parent(parent, path=module_path) - return pkg mod: Module = Module.from_parent(parent, path=module_path) return mod @@ -261,8 +268,8 @@ def pytest_pycollect_makeitem( elif getattr(obj, "__test__", True): if is_generator(obj): res: Function = Function.from_parent(collector, name=name) - reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format( - name=name + reason = ( + f"yield tests were removed in pytest 4.0 - {name} will be ignored" ) res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) res.warn(PytestCollectionWarning(reason)) @@ -384,7 +391,7 @@ del _EmptyClass # fmt: on -class PyCollector(PyobjMixin, nodes.Collector): +class PyCollector(PyobjMixin, nodes.Collector, abc.ABC): def funcnamefilter(self, name: str) -> bool: return self._matches_prefix_or_glob_option("python_functions", name) @@ -477,7 +484,9 @@ class PyCollector(PyobjMixin, nodes.Collector): clscol = self.getparent(Class) cls = clscol and clscol.obj or None - definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + definition: FunctionDefinition = FunctionDefinition.from_parent( + self, name=name, callobj=funcobj + ) fixtureinfo = definition._fixtureinfo # pytest_generate_tests impls call metafunc.parametrize() which fills @@ -500,13 +509,11 @@ class PyCollector(PyobjMixin, nodes.Collector): if not metafunc._calls: yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) else: - # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs. - fm = self.session._fixturemanager - fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) - - # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures - # with direct parametrization, so make sure we update what the - # function really needs. + # Direct parametrizations taking place in module/class-specific + # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure + # we update what the function really needs a.k.a its fixture closure. Note that + # direct parametrizations using `@pytest.mark.parametrize` have already been considered + # into making the closure using `ignore_args` arg to `getfixtureclosure`. fixtureinfo.prune_dependency_tree() for callspec in metafunc._calls: @@ -521,11 +528,62 @@ class PyCollector(PyobjMixin, nodes.Collector): ) +def importtestmodule( + path: Path, + config: Config, +): + # We assume we are only called once per module. + importmode = config.getoption("--import-mode") + try: + mod = import_path(path, mode=importmode, root=config.rootpath) + except SyntaxError as e: + raise nodes.Collector.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise nodes.Collector.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise nodes.Collector.CollectError( + f"ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + f"{formatted_tb}" + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise nodes.Collector.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + config.pluginmanager.consider_module(mod) + return mod + + class Module(nodes.File, PyCollector): """Collector for test classes and functions in a Python module.""" def _getobj(self): - return self._importtestmodule() + return importtestmodule(self.path, self.config) def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: self._inject_setup_module_fixture() @@ -610,57 +668,21 @@ class Module(nodes.File, PyCollector): self.obj.__pytest_setup_function = xunit_setup_function_fixture - def _importtestmodule(self): - # We assume we are only called once per module. - importmode = self.config.getoption("--import-mode") - try: - mod = import_path(self.path, mode=importmode, root=self.config.rootpath) - except SyntaxError as e: - raise self.CollectError( - ExceptionInfo.from_current().getrepr(style="short") - ) from e - except ImportPathMismatchError as e: - raise self.CollectError( - "import file mismatch:\n" - "imported module %r has this __file__ attribute:\n" - " %s\n" - "which is not the same as the test file we want to collect:\n" - " %s\n" - "HINT: remove __pycache__ / .pyc files and/or use a " - "unique basename for your test file modules" % e.args - ) from e - except ImportError as e: - exc_info = ExceptionInfo.from_current() - if self.config.getoption("verbose") < 2: - exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = ( - exc_info.getrepr(style="short") - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = str(exc_repr) - raise self.CollectError( - "ImportError while importing test module '{path}'.\n" - "Hint: make sure your test modules/packages have valid Python names.\n" - "Traceback:\n" - "{traceback}".format(path=self.path, traceback=formatted_tb) - ) from e - except skip.Exception as e: - if e.allow_module_level: - raise - raise self.CollectError( - "Using pytest.skip outside of a test will skip the entire module. " - "If that's your intention, pass `allow_module_level=True`. " - "If you want to skip a specific test or an entire class, " - "use the @pytest.mark.skip or @pytest.mark.skipif decorators." - ) from e - self.config.pluginmanager.consider_module(mod) - return mod - - -class Package(Module): + +class Package(nodes.Directory): """Collector for files and directories in a Python packages -- directories - with an `__init__.py` file.""" + with an `__init__.py` file. + + .. note:: + + Directories without an `__init__.py` file are instead collected by + :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` + collectors. + + .. versionchanged:: 8.0 + + Now inherits from :class:`~pytest.Directory`. + """ def __init__( self, @@ -673,10 +695,9 @@ class Package(Module): path: Optional[Path] = None, ) -> None: # NOTE: Could be just the following, but kept as-is for compat. - # nodes.FSCollector.__init__(self, fspath, parent=parent) + # super().__init__(self, fspath, parent=parent) session = parent.session - nodes.FSCollector.__init__( - self, + super().__init__( fspath=fspath, path=path, parent=parent, @@ -684,87 +705,53 @@ class Package(Module): session=session, nodeid=nodeid, ) - self.name = self.path.parent.name def setup(self) -> None: + init_mod = importtestmodule(self.path / "__init__.py", self.config) + # Not using fixtures to call setup_module here because autouse fixtures # from packages are not called automatically (#4085). setup_module = _get_first_non_fixture_func( - self.obj, ("setUpModule", "setup_module") + init_mod, ("setUpModule", "setup_module") ) if setup_module is not None: - _call_with_optional_argument(setup_module, self.obj) + _call_with_optional_argument(setup_module, init_mod) teardown_module = _get_first_non_fixture_func( - self.obj, ("tearDownModule", "teardown_module") + init_mod, ("tearDownModule", "teardown_module") ) if teardown_module is not None: - func = partial(_call_with_optional_argument, teardown_module, self.obj) + func = partial(_call_with_optional_argument, teardown_module, init_mod) self.addfinalizer(func) - def _recurse(self, direntry: "os.DirEntry[str]") -> bool: - if direntry.name == "__pycache__": - return False - fspath = Path(direntry.path) - ihook = self.session.gethookproxy(fspath.parent) - if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): - return False - return True - - def _collectfile( - self, fspath: Path, handle_dupes: bool = True - ) -> Sequence[nodes.Collector]: - assert ( - fspath.is_file() - ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( - fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() - ) - ihook = self.session.gethookproxy(fspath) - if not self.session.isinitpath(fspath): - if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): - return () - - if handle_dupes: - keepduplicates = self.config.getoption("keepduplicates") - if not keepduplicates: - duplicate_paths = self.config.pluginmanager._duplicatepaths - if fspath in duplicate_paths: - return () - else: - duplicate_paths.add(fspath) - - return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] - def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: - this_path = self.path.parent - - # Always collect the __init__ first. - if path_matches_patterns(self.path, self.config.getini("python_files")): - yield Module.from_parent(self, path=self.path) - - pkg_prefixes: Set[Path] = set() - for direntry in visit(str(this_path), recurse=self._recurse): - path = Path(direntry.path) + # Always collect __init__.py first. + def sort_key(entry: "os.DirEntry[str]") -> object: + return (entry.name != "__init__.py", entry.name) - # We will visit our own __init__.py file, in which case we skip it. - if direntry.is_file(): - if direntry.name == "__init__.py" and path.parent == this_path: + config = self.config + col: Optional[nodes.Collector] + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path, sort_key): + if direntry.is_dir(): + if direntry.name == "__pycache__": continue - - parts_ = parts(direntry.path) - if any( - str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path - for pkg_prefix in pkg_prefixes - ): - continue - - if direntry.is_file(): - yield from self._collectfile(path) - elif not direntry.is_dir(): - # Broken symlink or invalid/missing file. - continue - elif path.joinpath("__init__.py").is_file(): - pkg_prefixes.add(path) + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols def _call_with_optional_argument(func, arg) -> None: @@ -1003,8 +990,18 @@ class IdMaker: # Suffix non-unique IDs to make them unique. for index, id in enumerate(resolved_ids): if id_counts[id] > 1: - resolved_ids[index] = f"{id}{id_suffixes[id]}" + suffix = "" + if id and id[-1].isdigit(): + suffix = "_" + new_id = f"{id}{suffix}{id_suffixes[id]}" + while new_id in set(resolved_ids): + id_suffixes[id] += 1 + new_id = f"{id}{suffix}{id_suffixes[id]}" + resolved_ids[index] = new_id id_suffixes[id] += 1 + assert len(resolved_ids) == len( + set(resolved_ids) + ), f"Internal error: {resolved_ids=}" return resolved_ids def _limit_ids(self, ids, limit=500): @@ -1133,25 +1130,21 @@ class CallSpec2: and stored in item.callspec. """ - # arg name -> arg value which will be passed to the parametrized test - # function (direct parameterization). - funcargs: Dict[str, object] = dataclasses.field(default_factory=dict) - # arg name -> arg value which will be passed to a fixture of the same name - # (indirect parametrization). + # arg name -> arg value which will be passed to a fixture or pseudo-fixture + # of the same name. (indirect or direct parametrization respectively) params: Dict[str, object] = dataclasses.field(default_factory=dict) # arg name -> arg index. indices: Dict[str, int] = dataclasses.field(default_factory=dict) # Used for sorting parametrized resources. - _arg2scope: Dict[str, Scope] = dataclasses.field(default_factory=dict) + _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict) # Parts which will be added to the item's name in `[..]` separated by "-". - _idlist: List[str] = dataclasses.field(default_factory=list) + _idlist: Sequence[str] = dataclasses.field(default_factory=tuple) # Marks which will be applied to the item. marks: List[Mark] = dataclasses.field(default_factory=list) def setmulti( self, *, - valtypes: Mapping[str, "Literal['params', 'funcargs']"], argnames: Iterable[str], valset: Iterable[object], id: str, @@ -1159,24 +1152,16 @@ class CallSpec2: scope: Scope, param_index: int, ) -> "CallSpec2": - funcargs = self.funcargs.copy() params = self.params.copy() indices = self.indices.copy() - arg2scope = self._arg2scope.copy() + arg2scope = dict(self._arg2scope) for arg, val in zip(argnames, valset): - if arg in params or arg in funcargs: + if arg in params: raise ValueError(f"duplicate parametrization of {arg!r}") - valtype_for_arg = valtypes[arg] - if valtype_for_arg == "params": - params[arg] = val - elif valtype_for_arg == "funcargs": - funcargs[arg] = val - else: - assert_never(valtype_for_arg) + params[arg] = val indices[arg] = param_index arg2scope[arg] = scope return CallSpec2( - funcargs=funcargs, params=params, indices=indices, _arg2scope=arg2scope, @@ -1195,6 +1180,14 @@ class CallSpec2: return "-".join(self._idlist) +def get_direct_param_fixture_func(request: FixtureRequest) -> Any: + return request.param + + +# Used for storing pseudo fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[Dict[str, FixtureDef[Any]]]() + + @final class Metafunc: """Objects passed to the :hook:`pytest_generate_tests` hook. @@ -1247,7 +1240,7 @@ class Metafunc: ids: Optional[ Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]] ] = None, - scope: "Optional[_ScopeName]" = None, + scope: Optional[_ScopeName] = None, *, _param_mark: Optional[Mark] = None, ) -> None: @@ -1337,8 +1330,6 @@ class Metafunc: self._validate_if_using_arg_names(argnames, indirect) - arg_values_types = self._resolve_arg_value_types(argnames, indirect) - # Use any already (possibly) generated ids with parametrize Marks. if _param_mark and _param_mark._param_ids_from: generated_ids = _param_mark._param_ids_from._param_ids_generated @@ -1353,6 +1344,60 @@ class Metafunc: if _param_mark and _param_mark._param_ids_from and generated_ids is None: object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + # Add funcargs as fixturedefs to fixtureinfo.arg2fixturedefs by registering + # artificial "pseudo" FixtureDef's so that later at test execution time we can + # rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = self._arg2fixturedefs + node = None + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + if scope_ is not Scope.Function: + collector = self.definition.parent + assert collector is not None + node = get_scope_node(collector, scope_) + if node is None: + # If used class scope and there is no class, use module-level + # collector (for now). + if scope_ is Scope.Class: + assert isinstance(collector, _pytest.python.Module) + node = collector + # If used package scope and there is no package, use session + # (for now). + elif scope_ is Scope.Package: + node = collector.session + else: + assert False, f"Unhandled missing scope: {scope}" + if node is None: + name2pseudofixturedef = None + else: + default: Dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + arg_directness = self._resolve_args_directness(argnames, indirect) + for argname in argnames: + if arg_directness[argname] == "indirect": + continue + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + fixturedef = name2pseudofixturedef[argname] + else: + fixturedef = FixtureDef( + fixturemanager=self.definition.session._fixturemanager, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=scope_, + params=None, + unittest=False, + ids=None, + _ispytest=True, + ) + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + arg2fixturedefs[argname] = [fixturedef] + # Create the new calls: if we are parametrize() multiple times (by applying the decorator # more than once) then we accumulate those calls generating the cartesian product # of all calls. @@ -1362,7 +1407,6 @@ class Metafunc: zip(ids, parametersets) ): newcallspec = callspec.setmulti( - valtypes=arg_values_types, argnames=argnames, valset=param_set.values, id=param_id, @@ -1439,45 +1483,43 @@ class Metafunc: return list(itertools.islice(ids, num_ids)) - def _resolve_arg_value_types( + def _resolve_args_directness( self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]], - ) -> Dict[str, "Literal['params', 'funcargs']"]: - """Resolve if each parametrized argument must be considered a - parameter to a fixture or a "funcarg" to the function, based on the - ``indirect`` parameter of the parametrized() call. + ) -> Dict[str, Literal["indirect", "direct"]]: + """Resolve if each parametrized argument must be considered an indirect + parameter to a fixture of the same name, or a direct parameter to the + parametrized function, based on the ``indirect`` parameter of the + parametrized() call. - :param List[str] argnames: List of argument names passed to ``parametrize()``. - :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. - :rtype: Dict[str, str] - A dict mapping each arg name to either: - * "params" if the argname should be the parameter of a fixture of the same name. - * "funcargs" if the argname should be a parameter to the parametrized test function. + :param argnames: + List of argument names passed to ``parametrize()``. + :param indirect: + Same as the ``indirect`` parameter of ``parametrize()``. + :returns + A dict mapping each arg name to either "indirect" or "direct". """ + arg_directness: Dict[str, Literal["indirect", "direct"]] if isinstance(indirect, bool): - valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys( - argnames, "params" if indirect else "funcargs" + arg_directness = dict.fromkeys( + argnames, "indirect" if indirect else "direct" ) elif isinstance(indirect, Sequence): - valtypes = dict.fromkeys(argnames, "funcargs") + arg_directness = dict.fromkeys(argnames, "direct") for arg in indirect: if arg not in argnames: fail( - "In {}: indirect fixture '{}' doesn't exist".format( - self.function.__name__, arg - ), + f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", pytrace=False, ) - valtypes[arg] = "params" + arg_directness[arg] = "indirect" else: fail( - "In {func}: expected Sequence or boolean for indirect, got {type}".format( - type=type(indirect).__name__, func=self.function.__name__ - ), + f"In {self.function.__name__}: expected Sequence or boolean for indirect, got {type(indirect).__name__}", pytrace=False, ) - return valtypes + return arg_directness def _validate_if_using_arg_names( self, @@ -1496,9 +1538,7 @@ class Metafunc: if arg not in self.fixturenames: if arg in default_arg_names: fail( - "In {}: function already takes an argument '{}' with a default value".format( - func_name, arg - ), + f"In {func_name}: function already takes an argument '{arg}' with a default value", pytrace=False, ) else: @@ -1534,7 +1574,7 @@ def _find_parametrized_scope( if all_arguments_are_fixtures: fixturedefs = arg2fixturedefs or {} used_scopes = [ - fixturedef[0]._scope + fixturedef[-1]._scope for name, fixturedef in fixturedefs.items() if name in argnames ] @@ -1700,7 +1740,7 @@ class Function(PyobjMixin, nodes.Item): :param config: The pytest Config object. :param callspec: - If given, this is function has been parametrized and the callspec contains + If given, this function has been parametrized and the callspec contains meta information about the parametrization. :param callobj: If given, the object which will be called when the Function is invoked, @@ -1765,9 +1805,8 @@ class Function(PyobjMixin, nodes.Item): self.keywords.update(keywords) if fixtureinfo is None: - fixtureinfo = self.session._fixturemanager.getfixtureinfo( - self, self.obj, self.cls, funcargs=True - ) + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) self._fixtureinfo: FuncFixtureInfo = fixtureinfo self.fixturenames = fixtureinfo.names_closure self._initrequest() @@ -1779,7 +1818,7 @@ class Function(PyobjMixin, nodes.Item): def _initrequest(self) -> None: self.funcargs: Dict[str, object] = {} - self._request = fixtures.FixtureRequest(self, _ispytest=True) + self._request = fixtures.TopRequest(self, _ispytest=True) @property def function(self): @@ -1826,9 +1865,11 @@ class Function(PyobjMixin, nodes.Item): if self.config.getoption("tbstyle", "auto") == "auto": if len(ntraceback) > 2: ntraceback = Traceback( - entry - if i == 0 or i == len(ntraceback) - 1 - else entry.with_repr_style("short") + ( + entry + if i == 0 or i == len(ntraceback) - 1 + else entry.with_repr_style("short") + ) for i, entry in enumerate(ntraceback) ) diff --git a/contrib/python/pytest/py3/_pytest/python_api.py b/contrib/python/pytest/py3/_pytest/python_api.py index 183356100c5..2c0ba09bb1d 100644 --- a/contrib/python/pytest/py3/_pytest/python_api.py +++ b/contrib/python/pytest/py3/_pytest/python_api.py @@ -1,17 +1,19 @@ -import math -import pprint from collections.abc import Collection from collections.abc import Sized from decimal import Decimal +import math from numbers import Complex +import pprint from types import TracebackType from typing import Any from typing import Callable from typing import cast from typing import ContextManager +from typing import final from typing import List from typing import Mapping from typing import Optional +from typing import overload from typing import Pattern from typing import Sequence from typing import Tuple @@ -20,24 +22,13 @@ from typing import TYPE_CHECKING from typing import TypeVar from typing import Union -if TYPE_CHECKING: - from numpy import ndarray - - import _pytest._code -from _pytest.compat import final from _pytest.compat import STRING_TYPES -from _pytest.compat import overload from _pytest.outcomes import fail -def _non_numeric_type_error(value, at: Optional[str]) -> TypeError: - at_str = f" at {at}" if at else "" - return TypeError( - "cannot make approximate comparisons to non-numeric values: {!r} {}".format( - value, at_str - ) - ) +if TYPE_CHECKING: + from numpy import ndarray def _compare_approx( @@ -247,9 +238,7 @@ class ApproxMapping(ApproxBase): with numeric values (the keys can be anything).""" def __repr__(self) -> str: - return "approx({!r})".format( - {k: self._approx_scalar(v) for k, v in self.expected.items()} - ) + return f"approx({({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})" def _repr_compare(self, other_side: Mapping[object, float]) -> List[str]: import math @@ -324,9 +313,7 @@ class ApproxSequenceLike(ApproxBase): seq_type = type(self.expected) if seq_type not in (tuple, list): seq_type = list - return "approx({!r})".format( - seq_type(self._approx_scalar(x) for x in self.expected) - ) + return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})" def _repr_compare(self, other_side: Sequence[float]) -> List[str]: import math @@ -706,7 +693,6 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: ``approx`` falls back to strict equality for nonnumeric types instead of raising ``TypeError``. """ - # Delegate the comparison to a class that knows how to deal with the type # of the expected value (e.g. int, float, list, dict, numpy.array, etc). # @@ -805,34 +791,35 @@ def raises( # noqa: F811 def raises( # noqa: F811 expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any ) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]: - r"""Assert that a code block/function call raises an exception. + r"""Assert that a code block/function call raises an exception type, or one of its subclasses. - :param typing.Type[E] | typing.Tuple[typing.Type[E], ...] expected_exception: + :param expected_exception: The expected exception type, or a tuple if one of multiple possible - exception types are expected. - :kwparam str | typing.Pattern[str] | None match: + exception types are expected. Note that subclasses of the passed exceptions + will also match. + + :kwparam str | re.Pattern[str] | None match: If specified, a string containing a regular expression, or a regular expression object, that is tested against the string - representation of the exception using :func:`re.search`. + representation of the exception and its :pep:`678` `__notes__` + using :func:`re.search`. To match a literal string that may contain :ref:`special characters <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. - (This is only used when :py:func:`pytest.raises` is used as a context manager, + (This is only used when ``pytest.raises`` is used as a context manager, and passed through to the function otherwise. - When using :py:func:`pytest.raises` as a function, you can use: + When using ``pytest.raises`` as a function, you can use: ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) - .. currentmodule:: _pytest._code - Use ``pytest.raises`` as a context manager, which will capture the exception of the given - type:: + type, or any of its subclasses:: >>> import pytest >>> with pytest.raises(ZeroDivisionError): ... 1/0 - If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example above), or no exception at all, the check will fail instead. You can also use the keyword argument ``match`` to assert that the @@ -844,6 +831,14 @@ def raises( # noqa: F811 >>> with pytest.raises(ValueError, match=r'must be \d+$'): ... raise ValueError("value must be 42") + The ``match`` argument searches the formatted exception string, which includes any + `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``: + + >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP + ... e = ValueError("value must be 42") + ... e.add_note("had a note added") + ... raise e + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the details of the captured exception:: @@ -852,6 +847,20 @@ def raises( # noqa: F811 >>> assert exc_info.type is ValueError >>> assert exc_info.value.args[0] == "value must be 42" + .. warning:: + + Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: + + with pytest.raises(Exception): # Careful, this will catch ANY exception raised. + some_function() + + Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide + real bugs, where the user wrote this expecting a specific exception, but some other exception is being + raised due to a bug introduced during a refactoring. + + Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch + **any** exception raised. + .. note:: When using ``pytest.raises`` as a context manager, it's worthwhile to @@ -864,7 +873,7 @@ def raises( # noqa: F811 >>> with pytest.raises(ValueError) as exc_info: ... if value > 10: ... raise ValueError("value must be <= 10") - ... assert exc_info.type is ValueError # this will not execute + ... assert exc_info.type is ValueError # This will not execute. Instead, the following approach must be taken (note the difference in scope):: @@ -883,6 +892,10 @@ def raises( # noqa: F811 See :ref:`parametrizing_conditional_raising` for an example. + .. seealso:: + + :ref:`assertraises` for more examples and detailed discussion. + **Legacy form** It is possible to specify a callable by passing a to-be-called lambda:: diff --git a/contrib/python/pytest/py3/_pytest/recwarn.py b/contrib/python/pytest/py3/_pytest/recwarn.py index d76ea020f19..634eff2597a 100644 --- a/contrib/python/pytest/py3/_pytest/recwarn.py +++ b/contrib/python/pytest/py3/_pytest/recwarn.py @@ -1,25 +1,27 @@ """Record warnings during test function execution.""" -import re -import warnings + from pprint import pformat +import re from types import TracebackType from typing import Any from typing import Callable +from typing import final from typing import Generator from typing import Iterator from typing import List from typing import Optional +from typing import overload from typing import Pattern from typing import Tuple from typing import Type from typing import TypeVar from typing import Union +import warnings -from _pytest.compat import final -from _pytest.compat import overload from _pytest.deprecated import check_ispytest from _pytest.deprecated import WARNS_NONE_ARG from _pytest.fixtures import fixture +from _pytest.outcomes import Exit from _pytest.outcomes import fail @@ -56,7 +58,7 @@ def deprecated_call( # noqa: F811 def deprecated_call( # noqa: F811 func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any ) -> Union["WarningsRecorder", Any]: - """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``. + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. This function can be used as a context manager:: @@ -82,7 +84,9 @@ def deprecated_call( # noqa: F811 __tracebackhide__ = True if func is not None: args = (func,) + args - return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + return warns( + (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs + ) @overload @@ -117,10 +121,10 @@ def warns( # noqa: F811 warning of that class or classes. This helper produces a list of :class:`warnings.WarningMessage` objects, one for - each warning raised (regardless of whether it is an ``expected_warning`` or not). + each warning emitted (regardless of whether it is an ``expected_warning`` or not). + Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. - This function can be used as a context manager, which will capture all the raised - warnings inside it:: + This function can be used as a context manager:: >>> import pytest >>> with pytest.warns(RuntimeWarning): @@ -135,8 +139,9 @@ def warns( # noqa: F811 >>> with pytest.warns(UserWarning, match=r'must be \d+$'): ... warnings.warn("value must be 42", UserWarning) - >>> with pytest.warns(UserWarning, match=r'must be \d+$'): - ... warnings.warn("this is not here", UserWarning) + >>> with pytest.warns(UserWarning): # catch re-emitted warning + ... with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) Traceback (most recent call last): ... Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... @@ -205,10 +210,21 @@ class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] return len(self._list) def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage": - """Pop the first recorded warning, raise exception if not exists.""" + """Pop the first recorded warning which is an instance of ``cls``, + but not an instance of a child class of any other match. + Raises ``AssertionError`` if there is no match. + """ + best_idx: Optional[int] = None for i, w in enumerate(self._list): - if issubclass(w.category, cls): - return self._list.pop(i) + if w.category == cls: + return self._list.pop(i) # exact match, stop looking + if issubclass(w.category, cls) and ( + best_idx is None + or not issubclass(w.category, self._list[best_idx].category) + ): + best_idx = i + if best_idx is not None: + return self._list.pop(best_idx) __tracebackhide__ = True raise AssertionError(f"{cls!r} not found in warning list") @@ -277,6 +293,12 @@ class WarningsChecker(WarningsRecorder): self.expected_warning = expected_warning_tup self.match_expr = match_expr + def matches(self, warning: warnings.WarningMessage) -> bool: + assert self.expected_warning is not None + return issubclass(warning.category, self.expected_warning) and bool( + self.match_expr is None or re.search(self.match_expr, str(warning.message)) + ) + def __exit__( self, exc_type: Optional[Type[BaseException]], @@ -287,27 +309,45 @@ class WarningsChecker(WarningsRecorder): __tracebackhide__ = True + if self.expected_warning is None: + # nothing to do in this deprecated case, see WARNS_NONE_ARG above + return + + # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within + # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed + # when the warning doesn't happen. Control-flow exceptions should always + # propagate. + if exc_val is not None and ( + not isinstance(exc_val, Exception) + # Exit is an Exception, not a BaseException, for some reason. + or isinstance(exc_val, Exit) + ): + return + def found_str(): return pformat([record.message for record in self], indent=2) - # only check if we're not currently handling an exception - if exc_type is None and exc_val is None and exc_tb is None: - if self.expected_warning is not None: - if not any(issubclass(r.category, self.expected_warning) for r in self): - __tracebackhide__ = True - fail( - f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" - f"The list of emitted warnings is: {found_str()}." + try: + if not any(issubclass(w.category, self.expected_warning) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f" Emitted warnings: {found_str()}." + ) + elif not any(self.matches(w) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n" + f" Regex: {self.match_expr}\n" + f" Emitted warnings: {found_str()}." + ) + finally: + # Whether or not any warnings matched, we want to re-emit all unmatched warnings. + for w in self: + if not self.matches(w): + warnings.warn_explicit( + message=w.message, + category=w.category, + filename=w.filename, + lineno=w.lineno, + module=w.__module__, + source=w.source, ) - elif self.match_expr is not None: - for r in self: - if issubclass(r.category, self.expected_warning): - if re.compile(self.match_expr).search(str(r.message)): - break - else: - fail( - f"""\ -DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted. - Regex: {self.match_expr} - Emitted warnings: {found_str()}""" - ) diff --git a/contrib/python/pytest/py3/_pytest/reports.py b/contrib/python/pytest/py3/_pytest/reports.py index 74e8794b232..4c3ac0391cc 100644 --- a/contrib/python/pytest/py3/_pytest/reports.py +++ b/contrib/python/pytest/py3/_pytest/reports.py @@ -1,13 +1,15 @@ import dataclasses -import os from io import StringIO +import os from pprint import pprint from typing import Any from typing import cast from typing import Dict +from typing import final from typing import Iterable from typing import Iterator from typing import List +from typing import Literal from typing import Mapping from typing import NoReturn from typing import Optional @@ -29,15 +31,13 @@ from _pytest._code.code import ReprLocals from _pytest._code.code import ReprTraceback from _pytest._code.code import TerminalRepr from _pytest._io import TerminalWriter -from _pytest.compat import final from _pytest.config import Config from _pytest.nodes import Collector from _pytest.nodes import Item from _pytest.outcomes import skip -if TYPE_CHECKING: - from typing_extensions import Literal +if TYPE_CHECKING: from _pytest.runner import CallInfo @@ -46,7 +46,7 @@ def getworkerinfoline(node): return node._workerinfocache except AttributeError: d = node.workerinfo - ver = "%s.%s.%s" % d["version_info"][:3] + ver = "{}.{}.{}".format(*d["version_info"][:3]) node._workerinfocache = s = "[{}] {} -- Python {} {}".format( d["id"], d["sysplatform"], ver, d["executable"] ) @@ -64,7 +64,7 @@ class BaseReport: ] sections: List[Tuple[str, str]] nodeid: str - outcome: "Literal['passed', 'failed', 'skipped']" + outcome: Literal["passed", "failed", "skipped"] def __init__(self, **kw: Any) -> None: self.__dict__.update(kw) @@ -249,17 +249,20 @@ class TestReport(BaseReport): """ __test__ = False + # Defined by skipping plugin. + # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. + wasxfail: str def __init__( self, nodeid: str, location: Tuple[str, Optional[int], str], keywords: Mapping[str, Any], - outcome: "Literal['passed', 'failed', 'skipped']", + outcome: Literal["passed", "failed", "skipped"], longrepr: Union[ None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr ], - when: "Literal['setup', 'call', 'teardown']", + when: Literal["setup", "call", "teardown"], sections: Iterable[Tuple[str, str]] = (), duration: float = 0, start: float = 0, @@ -311,9 +314,7 @@ class TestReport(BaseReport): self.__dict__.update(extra) def __repr__(self) -> str: - return "<{} {!r} when={!r} outcome={!r}>".format( - self.__class__.__name__, self.nodeid, self.when, self.outcome - ) + return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" @classmethod def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": @@ -428,9 +429,7 @@ class CollectReport(BaseReport): return (self.fspath, None, self.fspath) def __repr__(self) -> str: - return "<CollectReport {!r} lenresult={} outcome={!r}>".format( - self.nodeid, len(self.result), self.outcome - ) + return f"<CollectReport {self.nodeid!r} lenresult={len(self.result)} outcome={self.outcome!r}>" class CollectErrorRepr(TerminalRepr): @@ -442,7 +441,7 @@ class CollectErrorRepr(TerminalRepr): def pytest_report_to_serializable( - report: Union[CollectReport, TestReport] + report: Union[CollectReport, TestReport], ) -> Optional[Dict[str, Any]]: if isinstance(report, (TestReport, CollectReport)): data = report._to_json() @@ -474,7 +473,7 @@ def _report_to_json(report: BaseReport) -> Dict[str, Any]: """ def serialize_repr_entry( - entry: Union[ReprEntry, ReprEntryNative] + entry: Union[ReprEntry, ReprEntryNative], ) -> Dict[str, Any]: data = dataclasses.asdict(entry) for key, value in data.items(): diff --git a/contrib/python/pytest/py3/_pytest/runner.py b/contrib/python/pytest/py3/_pytest/runner.py index f861c05a451..5befb0af11c 100644 --- a/contrib/python/pytest/py3/_pytest/runner.py +++ b/contrib/python/pytest/py3/_pytest/runner.py @@ -1,4 +1,5 @@ """Basic collect and runtest protocol implementations.""" + import bdb import dataclasses import os @@ -6,8 +7,10 @@ import sys from typing import Callable from typing import cast from typing import Dict +from typing import final from typing import Generic from typing import List +from typing import Literal from typing import Optional from typing import Tuple from typing import Type @@ -23,10 +26,10 @@ from _pytest import timing from _pytest._code.code import ExceptionChainRepr from _pytest._code.code import ExceptionInfo from _pytest._code.code import TerminalRepr -from _pytest.compat import final from _pytest.config.argparsing import Parser from _pytest.deprecated import check_ispytest from _pytest.nodes import Collector +from _pytest.nodes import Directory from _pytest.nodes import Item from _pytest.nodes import Node from _pytest.outcomes import Exit @@ -34,12 +37,11 @@ from _pytest.outcomes import OutcomeException from _pytest.outcomes import Skipped from _pytest.outcomes import TEST_OUTCOME + if sys.version_info[:2] < (3, 11): from exceptiongroup import BaseExceptionGroup if TYPE_CHECKING: - from typing_extensions import Literal - from _pytest.main import Session from _pytest.terminal import TerminalReporter @@ -93,8 +95,7 @@ def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: if verbose < 2 and rep.duration < durations_min: tr.write_line("") tr.write_line( - "(%s durations < %gs hidden. Use -vv to show these durations.)" - % (len(dlist) - i, durations_min) + f"({len(dlist) - i} durations < {durations_min:g}s hidden. Use -vv to show these durations.)" ) break tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") @@ -184,7 +185,7 @@ def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None: def _update_current_test_var( - item: Item, when: Optional["Literal['setup', 'call', 'teardown']"] + item: Item, when: Optional[Literal["setup", "call", "teardown"]] ) -> None: """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. @@ -217,7 +218,7 @@ def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str def call_and_report( - item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds + item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds ) -> TestReport: call = call_runtest_hook(item, when, **kwds) hook = item.ihook @@ -245,7 +246,7 @@ def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> def call_runtest_hook( - item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds + item: Item, when: Literal["setup", "call", "teardown"], **kwds ) -> "CallInfo[None]": if when == "setup": ihook: Callable[..., None] = item.ihook.pytest_runtest_setup @@ -281,7 +282,7 @@ class CallInfo(Generic[TResult]): #: The call duration, in seconds. duration: float #: The context of invocation: "collect", "setup", "call" or "teardown". - when: "Literal['collect', 'setup', 'call', 'teardown']" + when: Literal["collect", "setup", "call", "teardown"] def __init__( self, @@ -290,7 +291,7 @@ class CallInfo(Generic[TResult]): start: float, stop: float, duration: float, - when: "Literal['collect', 'setup', 'call', 'teardown']", + when: Literal["collect", "setup", "call", "teardown"], *, _ispytest: bool = False, ) -> None: @@ -318,8 +319,8 @@ class CallInfo(Generic[TResult]): @classmethod def from_call( cls, - func: "Callable[[], TResult]", - when: "Literal['collect', 'setup', 'call', 'teardown']", + func: Callable[[], TResult], + when: Literal["collect", "setup", "call", "teardown"], reraise: Optional[ Union[Type[BaseException], Tuple[Type[BaseException], ...]] ] = None, @@ -369,7 +370,23 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: def pytest_make_collect_report(collector: Collector) -> CollectReport: - call = CallInfo.from_call(lambda: list(collector.collect()), "collect") + def collect() -> List[Union[Item, Collector]]: + # Before collecting, if this is a Directory, load the conftests. + # If a conftest import fails to load, it is considered a collection + # error of the Directory collector. This is why it's done inside of the + # CallInfo wrapper. + # + # Note: initial conftests are loaded early, not here. + if isinstance(collector, Directory): + collector.config.pluginmanager._loadconftestmodules( + collector.path, + collector.config.getoption("importmode"), + rootpath=collector.config.rootpath, + ) + + return list(collector.collect()) + + call = CallInfo.from_call(collect, "collect") longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None if not call.excinfo: outcome: Literal["passed", "skipped", "failed"] = "passed" diff --git a/contrib/python/pytest/py3/_pytest/scope.py b/contrib/python/pytest/py3/_pytest/scope.py index 7a746fb9fa9..2c6e23208f2 100644 --- a/contrib/python/pytest/py3/_pytest/scope.py +++ b/contrib/python/pytest/py3/_pytest/scope.py @@ -7,15 +7,14 @@ would cause circular references. Also this makes the module light to import, as it should. """ + from enum import Enum from functools import total_ordering +from typing import Literal from typing import Optional -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing_extensions import Literal - _ScopeName = Literal["session", "package", "module", "class", "function"] +_ScopeName = Literal["session", "package", "module", "class", "function"] @total_ordering @@ -33,11 +32,11 @@ class Scope(Enum): """ # Scopes need to be listed from lower to higher. - Function: "_ScopeName" = "function" - Class: "_ScopeName" = "class" - Module: "_ScopeName" = "module" - Package: "_ScopeName" = "package" - Session: "_ScopeName" = "session" + Function: _ScopeName = "function" + Class: _ScopeName = "class" + Module: _ScopeName = "module" + Package: _ScopeName = "package" + Session: _ScopeName = "session" def next_lower(self) -> "Scope": """Return the next lower scope.""" @@ -60,7 +59,7 @@ class Scope(Enum): @classmethod def from_user( - cls, scope_name: "_ScopeName", descr: str, where: Optional[str] = None + cls, scope_name: _ScopeName, descr: str, where: Optional[str] = None ) -> "Scope": """ Given a scope name from the user, return the equivalent Scope enum. Should be used diff --git a/contrib/python/pytest/py3/_pytest/setuponly.py b/contrib/python/pytest/py3/_pytest/setuponly.py index 583590d6b70..6c73860aa42 100644 --- a/contrib/python/pytest/py3/_pytest/setuponly.py +++ b/contrib/python/pytest/py3/_pytest/setuponly.py @@ -2,7 +2,6 @@ from typing import Generator from typing import Optional from typing import Union -import pytest from _pytest._io.saferepr import saferepr from _pytest.config import Config from _pytest.config import ExitCode @@ -10,6 +9,7 @@ from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef from _pytest.fixtures import SubRequest from _pytest.scope import Scope +import pytest def pytest_addoption(parser: Parser) -> None: @@ -28,24 +28,26 @@ def pytest_addoption(parser: Parser) -> None: ) -@pytest.hookimpl(hookwrapper=True) +@pytest.hookimpl(wrapper=True) def pytest_fixture_setup( fixturedef: FixtureDef[object], request: SubRequest -) -> Generator[None, None, None]: - yield - if request.config.option.setupshow: - if hasattr(request, "param"): - # Save the fixture parameter so ._show_fixture_action() can - # display it now and during the teardown (in .finish()). - if fixturedef.ids: - if callable(fixturedef.ids): - param = fixturedef.ids(request.param) +) -> Generator[None, object, object]: + try: + return (yield) + finally: + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] else: - param = fixturedef.ids[request.param_index] - else: - param = request.param - fixturedef.cached_param = param # type: ignore[attr-defined] - _show_fixture_action(fixturedef, "SETUP") + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, "SETUP") def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None: @@ -69,7 +71,7 @@ def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: scope_indent = list(reversed(Scope)).index(fixturedef._scope) tw.write(" " * 2 * scope_indent) tw.write( - "{step} {scope} {fixture}".format( + "{step} {scope} {fixture}".format( # noqa: UP032 (Readability) step=msg.ljust(8), # align the output to TEARDOWN scope=fixturedef.scope[0].upper(), fixture=fixturedef.argname, diff --git a/contrib/python/pytest/py3/_pytest/setupplan.py b/contrib/python/pytest/py3/_pytest/setupplan.py index 1a4ebdd99ca..13c0df84ea1 100644 --- a/contrib/python/pytest/py3/_pytest/setupplan.py +++ b/contrib/python/pytest/py3/_pytest/setupplan.py @@ -1,12 +1,12 @@ from typing import Optional from typing import Union -import pytest from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef from _pytest.fixtures import SubRequest +import pytest def pytest_addoption(parser: Parser) -> None: diff --git a/contrib/python/pytest/py3/_pytest/skipping.py b/contrib/python/pytest/py3/_pytest/skipping.py index 26ce73758a0..76d523d7060 100644 --- a/contrib/python/pytest/py3/_pytest/skipping.py +++ b/contrib/python/pytest/py3/_pytest/skipping.py @@ -1,10 +1,11 @@ """Support for skip/xfail functions and markers.""" + +from collections.abc import Mapping import dataclasses import os import platform import sys import traceback -from collections.abc import Mapping from typing import Generator from typing import Optional from typing import Tuple @@ -19,6 +20,7 @@ from _pytest.outcomes import fail from _pytest.outcomes import skip from _pytest.outcomes import xfail from _pytest.reports import BaseReport +from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.stash import StashKey @@ -103,9 +105,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, ): if not isinstance(dictionary, Mapping): raise ValueError( - "pytest_markeval_namespace() needs to return a dict, got {!r}".format( - dictionary - ) + f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" ) globals_.update(dictionary) if hasattr(item, "obj"): @@ -243,7 +243,7 @@ def pytest_runtest_setup(item: Item) -> None: xfail("[NOTRUN] " + xfailed.reason) -@hookimpl(hookwrapper=True) +@hookimpl(wrapper=True) def pytest_runtest_call(item: Item) -> Generator[None, None, None]: xfailed = item.stash.get(xfailed_key, None) if xfailed is None: @@ -252,18 +252,20 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]: if xfailed and not item.config.option.runxfail and not xfailed.run: xfail("[NOTRUN] " + xfailed.reason) - yield - - # The test run may have added an xfail mark dynamically. - xfailed = item.stash.get(xfailed_key, None) - if xfailed is None: - item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + try: + return (yield) + finally: + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) -@hookimpl(hookwrapper=True) -def pytest_runtest_makereport(item: Item, call: CallInfo[None]): - outcome = yield - rep = outcome.get_result() +@hookimpl(wrapper=True) +def pytest_runtest_makereport( + item: Item, call: CallInfo[None] +) -> Generator[None, TestReport, TestReport]: + rep = yield xfailed = item.stash.get(xfailed_key, None) if item.config.option.runxfail: pass # don't interfere @@ -286,6 +288,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]): else: rep.outcome = "passed" rep.wasxfail = xfailed.reason + return rep def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: diff --git a/contrib/python/pytest/py3/_pytest/stepwise.py b/contrib/python/pytest/py3/_pytest/stepwise.py index 74ad9dbd4dd..3ebebc288f8 100644 --- a/contrib/python/pytest/py3/_pytest/stepwise.py +++ b/contrib/python/pytest/py3/_pytest/stepwise.py @@ -2,12 +2,13 @@ from typing import List from typing import Optional from typing import TYPE_CHECKING -import pytest from _pytest import nodes from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.main import Session from _pytest.reports import TestReport +import pytest + if TYPE_CHECKING: from _pytest.cacheprovider import Cache diff --git a/contrib/python/pytest/py3/_pytest/terminal.py b/contrib/python/pytest/py3/_pytest/terminal.py index b0cdb58ce00..84ce8d8c6c6 100644 --- a/contrib/python/pytest/py3/_pytest/terminal.py +++ b/contrib/python/pytest/py3/_pytest/terminal.py @@ -2,24 +2,25 @@ This is a good source for looking at the various reporting hooks. """ + import argparse +from collections import Counter import dataclasses import datetime +from functools import partial import inspect +from pathlib import Path import platform import sys import textwrap -import warnings -from collections import Counter -from functools import partial -from pathlib import Path from typing import Any from typing import Callable -from typing import cast from typing import ClassVar from typing import Dict +from typing import final from typing import Generator from typing import List +from typing import Literal from typing import Mapping from typing import NamedTuple from typing import Optional @@ -29,18 +30,18 @@ from typing import TextIO from typing import Tuple from typing import TYPE_CHECKING from typing import Union +import warnings import pluggy -import _pytest._version from _pytest import nodes from _pytest import timing from _pytest._code import ExceptionInfo from _pytest._code.code import ExceptionRepr from _pytest._io import TerminalWriter from _pytest._io.wcwidth import wcswidth +import _pytest._version from _pytest.assertion.util import running_on_ci -from _pytest.compat import final from _pytest.config import _PluggyPlugin from _pytest.config import Config from _pytest.config import ExitCode @@ -54,9 +55,8 @@ from _pytest.reports import BaseReport from _pytest.reports import CollectReport from _pytest.reports import TestReport -if TYPE_CHECKING: - from typing_extensions import Literal +if TYPE_CHECKING: from _pytest.main import Session @@ -367,7 +367,7 @@ class TerminalReporter: self._already_displayed_warnings: Optional[int] = None self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None - def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]": + def _determine_show_progress_info(self) -> Literal["progress", "count", False]: """Return whether we should display progress information based on the current config.""" # do not show progress if we are not capturing output (#3038) unless explicitly # overridden by progress-even-when-capture-no @@ -672,8 +672,8 @@ class TerminalReporter: return f" [ {collected} / {collected} ]" else: if collected: - return " [{:3d}%]".format( - len(self._progress_nodeids_reported) * 100 // collected + return ( + f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]" ) return " [100%]" @@ -758,9 +758,7 @@ class TerminalReporter: if pypy_version_info: verinfo = ".".join(map(str, pypy_version_info[:3])) msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" - msg += ", pytest-{}, pluggy-{}".format( - _pytest._version.version, pluggy.__version__ - ) + msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" if ( self.verbosity > 0 or self.config.option.debug @@ -849,12 +847,11 @@ class TerminalReporter: for line in doc.splitlines(): self._tw.line("{}{}".format(indent + " ", line)) - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_sessionfinish( self, session: "Session", exitstatus: Union[int, ExitCode] - ): - outcome = yield - outcome.get_result() + ) -> Generator[None, None, None]: + result = yield self._tw.line("") summary_exit_codes = ( ExitCode.OK, @@ -875,17 +872,22 @@ class TerminalReporter: elif session.shouldstop: self.write_sep("!", str(session.shouldstop), red=True) self.summary_stats() + return result - @hookimpl(hookwrapper=True) + @hookimpl(wrapper=True) def pytest_terminal_summary(self) -> Generator[None, None, None]: self.summary_errors() self.summary_failures() + self.summary_xfailures() self.summary_warnings() self.summary_passes() - yield - self.short_test_summary() - # Display any extra warnings from teardown here (if any). - self.summary_warnings() + self.summary_xpasses() + try: + return (yield) + finally: + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) @@ -1009,12 +1011,20 @@ class TerminalReporter: ) def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: if self.config.option.tbstyle != "no": - if self.hasopt("P"): - reports: List[TestReport] = self.getreports("passed") + if self.hasopt(needed_opt): + reports: List[TestReport] = self.getreports(which_reports) if not reports: return - self.write_sep("=", "PASSES") + self.write_sep("=", sep_title) for rep in reports: if rep.sections: msg = self._getfailureheadline(rep) @@ -1048,21 +1058,30 @@ class TerminalReporter: self._tw.line(content) def summary_failures(self) -> None: + self.summary_failures_combined("failed", "FAILURES") + + def summary_xfailures(self) -> None: + self.summary_failures_combined("xfailed", "XFAILURES", "x") + + def summary_failures_combined( + self, which_reports: str, sep_title: str, needed_opt: Optional[str] = None + ) -> None: if self.config.option.tbstyle != "no": - reports: List[BaseReport] = self.getreports("failed") - if not reports: - return - self.write_sep("=", "FAILURES") - if self.config.option.tbstyle == "line": - for rep in reports: - line = self._getcrashline(rep) - self.write_line(line) - else: - for rep in reports: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg, red=True, bold=True) - self._outrep_summary(rep) - self._handle_teardown_sections(rep.nodeid) + if not needed_opt or self.hasopt(needed_opt): + reports: List[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) def summary_errors(self) -> None: if self.config.option.tbstyle != "no": @@ -1168,8 +1187,11 @@ class TerminalReporter: verbose_word, **{_color_for_type["warnings"]: True} ) nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" reason = rep.wasxfail - lines.append(f"{markup_word} {nodeid} {reason}") + if reason: + line += " - " + str(reason) + lines.append(line) def show_skipped(lines: List[str]) -> None: skipped: List[CollectReport] = self.stats.get("skipped", []) @@ -1441,7 +1463,7 @@ def _plugin_nameversions(plugininfo) -> List[str]: values: List[str] = [] for plugin, dist in plugininfo: # Gets us name and version! - name = "{dist.project_name}-{dist.version}".format(dist=dist) + name = f"{dist.project_name}-{dist.version}" # Questionable convenience, but it keeps things short. if name.startswith("pytest-"): name = name[7:] @@ -1466,7 +1488,7 @@ def _get_raw_skip_reason(report: TestReport) -> str: The string is just the part given by the user. """ if hasattr(report, "wasxfail"): - reason = cast(str, report.wasxfail) + reason = report.wasxfail if reason.startswith("reason: "): reason = reason[len("reason: ") :] return reason diff --git a/contrib/python/pytest/py3/_pytest/threadexception.py b/contrib/python/pytest/py3/_pytest/threadexception.py index 43341e739a0..09faf661b91 100644 --- a/contrib/python/pytest/py3/_pytest/threadexception.py +++ b/contrib/python/pytest/py3/_pytest/threadexception.py @@ -1,12 +1,12 @@ import threading import traceback -import warnings from types import TracebackType from typing import Any from typing import Callable from typing import Generator from typing import Optional from typing import Type +import warnings import pytest @@ -59,30 +59,34 @@ class catch_threading_exception: def thread_exception_runtest_hook() -> Generator[None, None, None]: with catch_threading_exception() as cm: - yield - if cm.args: - thread_name = "<unknown>" if cm.args.thread is None else cm.args.thread.name - msg = f"Exception in thread {thread_name}\n\n" - msg += "".join( - traceback.format_exception( - cm.args.exc_type, - cm.args.exc_value, - cm.args.exc_traceback, + try: + yield + finally: + if cm.args: + thread_name = ( + "<unknown>" if cm.args.thread is None else cm.args.thread.name + ) + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, + ) ) - ) - warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) -@pytest.hookimpl(hookwrapper=True, trylast=True) +@pytest.hookimpl(wrapper=True, trylast=True) def pytest_runtest_setup() -> Generator[None, None, None]: yield from thread_exception_runtest_hook() -@pytest.hookimpl(hookwrapper=True, tryfirst=True) +@pytest.hookimpl(wrapper=True, tryfirst=True) def pytest_runtest_call() -> Generator[None, None, None]: yield from thread_exception_runtest_hook() -@pytest.hookimpl(hookwrapper=True, tryfirst=True) +@pytest.hookimpl(wrapper=True, tryfirst=True) def pytest_runtest_teardown() -> Generator[None, None, None]: yield from thread_exception_runtest_hook() diff --git a/contrib/python/pytest/py3/_pytest/timing.py b/contrib/python/pytest/py3/_pytest/timing.py index 925163a5858..0541dc8e0a1 100644 --- a/contrib/python/pytest/py3/_pytest/timing.py +++ b/contrib/python/pytest/py3/_pytest/timing.py @@ -5,8 +5,10 @@ pytest runtime information (issue #185). Fixture "mock_timing" also interacts with this module for pytest's own tests. """ + from time import perf_counter from time import sleep from time import time + __all__ = ["perf_counter", "sleep", "time"] diff --git a/contrib/python/pytest/py3/_pytest/tmpdir.py b/contrib/python/pytest/py3/_pytest/tmpdir.py index 3cc2bace55b..986824ccb72 100644 --- a/contrib/python/pytest/py3/_pytest/tmpdir.py +++ b/contrib/python/pytest/py3/_pytest/tmpdir.py @@ -1,44 +1,40 @@ """Support for providing temporary directories to test functions.""" + import dataclasses import os -import re -import tempfile from pathlib import Path +import re from shutil import rmtree +import tempfile from typing import Any from typing import Dict +from typing import final from typing import Generator +from typing import Literal from typing import Optional -from typing import TYPE_CHECKING from typing import Union -from _pytest.nodes import Item -from _pytest.reports import CollectReport -from _pytest.stash import StashKey - -if TYPE_CHECKING: - from typing_extensions import Literal - - RetentionType = Literal["all", "failed", "none"] - - -from _pytest.config.argparsing import Parser - +from .pathlib import cleanup_dead_symlinks from .pathlib import LOCK_TIMEOUT from .pathlib import make_numbered_dir from .pathlib import make_numbered_dir_with_cleanup from .pathlib import rm_rf -from .pathlib import cleanup_dead_symlinks -from _pytest.compat import final, get_user_id +from _pytest.compat import get_user_id from _pytest.config import Config from _pytest.config import ExitCode from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser from _pytest.deprecated import check_ispytest from _pytest.fixtures import fixture from _pytest.fixtures import FixtureRequest from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.stash import StashKey + tmppath_result_key = StashKey[Dict[str, bool]]() +RetentionType = Literal["all", "failed", "none"] @final @@ -54,13 +50,13 @@ class TempPathFactory: _trace: Any _basetemp: Optional[Path] _retention_count: int - _retention_policy: "RetentionType" + _retention_policy: RetentionType def __init__( self, given_basetemp: Optional[Path], retention_count: int, - retention_policy: "RetentionType", + retention_policy: RetentionType, trace, basetemp: Optional[Path] = None, *, @@ -209,7 +205,7 @@ def get_user() -> Optional[str]: import getpass return getpass.getuser() - except (ImportError, KeyError): + except (ImportError, OSError, KeyError): return None @@ -273,7 +269,6 @@ def tmp_path( The returned object is a :class:`pathlib.Path` object. """ - path = _mk_tmp(request, tmp_path_factory) yield path @@ -315,10 +310,12 @@ def pytest_sessionfinish(session, exitstatus: Union[int, ExitCode]): cleanup_dead_symlinks(basetemp) -@hookimpl(tryfirst=True, hookwrapper=True) -def pytest_runtest_makereport(item: Item, call): - outcome = yield - result: CollectReport = outcome.get_result() - +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport( + item: Item, call +) -> Generator[None, TestReport, TestReport]: + rep = yield + assert rep.when is not None empty: Dict[str, bool] = {} - item.stash.setdefault(tmppath_result_key, empty)[result.when] = result.passed + item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed + return rep diff --git a/contrib/python/pytest/py3/_pytest/unittest.py b/contrib/python/pytest/py3/_pytest/unittest.py index d42a12a3a9e..de68f396537 100644 --- a/contrib/python/pytest/py3/_pytest/unittest.py +++ b/contrib/python/pytest/py3/_pytest/unittest.py @@ -1,4 +1,5 @@ """Discover and run std-library "unittest" style tests.""" + import sys import traceback import types @@ -14,7 +15,6 @@ from typing import TYPE_CHECKING from typing import Union import _pytest._code -import pytest from _pytest.compat import getimfunc from _pytest.compat import is_async_function from _pytest.config import hookimpl @@ -30,9 +30,12 @@ from _pytest.python import Function from _pytest.python import Module from _pytest.runner import CallInfo from _pytest.scope import Scope +import pytest + if TYPE_CHECKING: import unittest + import twisted.trial.unittest _SysExcInfoType = Union[ @@ -200,10 +203,10 @@ class TestCaseFunction(Function): assert self.parent is not None self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined] self._obj = getattr(self._testcase, self.name) - if hasattr(self, "_request"): - self._request._fillfixtures() + super().setup() def teardown(self) -> None: + super().teardown() if self._explicit_tearDown is not None: self._explicit_tearDown() self._explicit_tearDown = None @@ -217,7 +220,9 @@ class TestCaseFunction(Function): # Unwrap potential exception info (see twisted trial support below). rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) try: - excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type] + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( + rawexcinfo # type: ignore[arg-type] + ) # Invoke the attributes to trigger storing the traceback # trial causes some issue there. excinfo.value @@ -362,9 +367,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: # handled internally, and doesn't reach here. unittest = sys.modules.get("unittest") if ( - unittest - and call.excinfo - and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] + unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] ): excinfo = call.excinfo call2 = CallInfo[None].from_call( @@ -376,8 +379,8 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: # Twisted trial support. -@hookimpl(hookwrapper=True) -def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: +@hookimpl(wrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: ut: Any = sys.modules["twisted.python.failure"] Failure__init__ = ut.Failure.__init__ @@ -400,17 +403,20 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: Failure__init__(self, exc_value, exc_type, exc_tb) ut.Failure.__init__ = excstore - yield - ut.Failure.__init__ = Failure__init__ + try: + res = yield + finally: + ut.Failure.__init__ = Failure__init__ else: - yield + res = yield + return res def check_testcase_implements_trial_reporter(done: List[int] = []) -> None: if done: return - from zope.interface import classImplements from twisted.trial.itrial import IReporter + from zope.interface import classImplements classImplements(TestCaseFunction, IReporter) done.append(1) diff --git a/contrib/python/pytest/py3/_pytest/unraisableexception.py b/contrib/python/pytest/py3/_pytest/unraisableexception.py index fcb5d8237c1..f649267abf1 100644 --- a/contrib/python/pytest/py3/_pytest/unraisableexception.py +++ b/contrib/python/pytest/py3/_pytest/unraisableexception.py @@ -1,12 +1,12 @@ import sys import traceback -import warnings from types import TracebackType from typing import Any from typing import Callable from typing import Generator from typing import Optional from typing import Type +import warnings import pytest @@ -61,33 +61,35 @@ class catch_unraisable_exception: def unraisable_exception_runtest_hook() -> Generator[None, None, None]: with catch_unraisable_exception() as cm: - yield - if cm.unraisable: - if cm.unraisable.err_msg is not None: - err_msg = cm.unraisable.err_msg - else: - err_msg = "Exception ignored in" - msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" - msg += "".join( - traceback.format_exception( - cm.unraisable.exc_type, - cm.unraisable.exc_value, - cm.unraisable.exc_traceback, + try: + yield + finally: + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) ) - ) - warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) -@pytest.hookimpl(hookwrapper=True, tryfirst=True) +@pytest.hookimpl(wrapper=True, tryfirst=True) def pytest_runtest_setup() -> Generator[None, None, None]: yield from unraisable_exception_runtest_hook() -@pytest.hookimpl(hookwrapper=True, tryfirst=True) +@pytest.hookimpl(wrapper=True, tryfirst=True) def pytest_runtest_call() -> Generator[None, None, None]: yield from unraisable_exception_runtest_hook() -@pytest.hookimpl(hookwrapper=True, tryfirst=True) +@pytest.hookimpl(wrapper=True, tryfirst=True) def pytest_runtest_teardown() -> Generator[None, None, None]: yield from unraisable_exception_runtest_hook() diff --git a/contrib/python/pytest/py3/_pytest/warning_types.py b/contrib/python/pytest/py3/_pytest/warning_types.py index bd5f4187343..ae00ccfa613 100644 --- a/contrib/python/pytest/py3/_pytest/warning_types.py +++ b/contrib/python/pytest/py3/_pytest/warning_types.py @@ -1,13 +1,12 @@ import dataclasses import inspect -import warnings from types import FunctionType from typing import Any +from typing import final from typing import Generic from typing import Type from typing import TypeVar - -from _pytest.compat import final +import warnings class PytestWarning(UserWarning): @@ -56,7 +55,13 @@ class PytestRemovedIn8Warning(PytestDeprecationWarning): __module__ = "pytest" -class PytestReturnNotNoneWarning(PytestRemovedIn8Warning): +class PytestRemovedIn9Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 9.""" + + __module__ = "pytest" + + +class PytestReturnNotNoneWarning(PytestWarning): """Warning emitted when a test function is returning value other than None.""" __module__ = "pytest" @@ -74,11 +79,7 @@ class PytestExperimentalApiWarning(PytestWarning, FutureWarning): @classmethod def simple(cls, apiname: str) -> "PytestExperimentalApiWarning": - return cls( - "{apiname} is an experimental api that may change over time".format( - apiname=apiname - ) - ) + return cls(f"{apiname} is an experimental api that may change over time") @final diff --git a/contrib/python/pytest/py3/_pytest/warnings.py b/contrib/python/pytest/py3/_pytest/warnings.py index 4aaa9445293..f45163fa2e6 100644 --- a/contrib/python/pytest/py3/_pytest/warnings.py +++ b/contrib/python/pytest/py3/_pytest/warnings.py @@ -1,20 +1,17 @@ -import sys -import warnings from contextlib import contextmanager +import sys from typing import Generator +from typing import Literal from typing import Optional -from typing import TYPE_CHECKING +import warnings -import pytest from _pytest.config import apply_warning_filters from _pytest.config import Config from _pytest.config import parse_warning_filter from _pytest.main import Session from _pytest.nodes import Item from _pytest.terminal import TerminalReporter - -if TYPE_CHECKING: - from typing_extensions import Literal +import pytest def pytest_configure(config: Config) -> None: @@ -29,7 +26,7 @@ def pytest_configure(config: Config) -> None: def catch_warnings_for_item( config: Config, ihook, - when: "Literal['config', 'collect', 'runtest']", + when: Literal["config", "collect", "runtest"], item: Optional[Item], ) -> Generator[None, None, None]: """Context manager that catches warnings generated in the contained execution block. @@ -49,6 +46,8 @@ def catch_warnings_for_item( warnings.filterwarnings("always", category=DeprecationWarning) warnings.filterwarnings("always", category=PendingDeprecationWarning) + warnings.filterwarnings("error", category=pytest.PytestRemovedIn8Warning) + apply_warning_filters(config_filters, cmdline_filters) # apply filters from "filterwarnings" marks @@ -58,17 +57,18 @@ def catch_warnings_for_item( for arg in mark.args: warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) - yield - - for warning_message in log: - ihook.pytest_warning_recorded.call_historic( - kwargs=dict( - warning_message=warning_message, - nodeid=nodeid, - when=when, - location=None, + try: + yield + finally: + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) ) - ) def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: @@ -101,24 +101,24 @@ def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: return msg -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: with catch_warnings_for_item( config=item.config, ihook=item.ihook, when="runtest", item=item ): - yield + return (yield) -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_collection(session: Session) -> Generator[None, None, None]: +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, object, object]: config = session.config with catch_warnings_for_item( config=config, ihook=config.hook, when="collect", item=None ): - yield + return (yield) -@pytest.hookimpl(hookwrapper=True) +@pytest.hookimpl(wrapper=True) def pytest_terminal_summary( terminalreporter: TerminalReporter, ) -> Generator[None, None, None]: @@ -126,23 +126,23 @@ def pytest_terminal_summary( with catch_warnings_for_item( config=config, ihook=config.hook, when="config", item=None ): - yield + return (yield) -@pytest.hookimpl(hookwrapper=True) +@pytest.hookimpl(wrapper=True) def pytest_sessionfinish(session: Session) -> Generator[None, None, None]: config = session.config with catch_warnings_for_item( config=config, ihook=config.hook, when="config", item=None ): - yield + return (yield) -@pytest.hookimpl(hookwrapper=True) +@pytest.hookimpl(wrapper=True) def pytest_load_initial_conftests( early_config: "Config", ) -> Generator[None, None, None]: with catch_warnings_for_item( config=early_config, ihook=early_config.hook, when="config", item=None ): - yield + return (yield) |