aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/pytest/py3/_pytest
diff options
context:
space:
mode:
authormonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
committermonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
commit06e5c21a835c0e923506c4ff27929f34e00761c2 (patch)
tree75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/python/pytest/py3/_pytest
parent03f024c4412e3aa613bb543cf1660176320ba8f4 (diff)
downloadydb-06e5c21a835c0e923506c4ff27929f34e00761c2.tar.gz
fix ya.make
Diffstat (limited to 'contrib/python/pytest/py3/_pytest')
-rw-r--r--contrib/python/pytest/py3/_pytest/__init__.py9
-rw-r--r--contrib/python/pytest/py3/_pytest/_argcomplete.py116
-rw-r--r--contrib/python/pytest/py3/_pytest/_code/__init__.py22
-rw-r--r--contrib/python/pytest/py3/_pytest/_code/code.py1274
-rw-r--r--contrib/python/pytest/py3/_pytest/_code/source.py217
-rw-r--r--contrib/python/pytest/py3/_pytest/_io/__init__.py8
-rw-r--r--contrib/python/pytest/py3/_pytest/_io/saferepr.py153
-rw-r--r--contrib/python/pytest/py3/_pytest/_io/terminalwriter.py233
-rw-r--r--contrib/python/pytest/py3/_pytest/_io/wcwidth.py55
-rw-r--r--contrib/python/pytest/py3/_pytest/_version.py5
-rw-r--r--contrib/python/pytest/py3/_pytest/assertion/__init__.py181
-rw-r--r--contrib/python/pytest/py3/_pytest/assertion/rewrite.py1129
-rw-r--r--contrib/python/pytest/py3/_pytest/assertion/truncate.py94
-rw-r--r--contrib/python/pytest/py3/_pytest/assertion/util.py509
-rw-r--r--contrib/python/pytest/py3/_pytest/cacheprovider.py580
-rw-r--r--contrib/python/pytest/py3/_pytest/capture.py942
-rw-r--r--contrib/python/pytest/py3/_pytest/compat.py405
-rw-r--r--contrib/python/pytest/py3/_pytest/config/__init__.py1693
-rw-r--r--contrib/python/pytest/py3/_pytest/config/argparsing.py535
-rw-r--r--contrib/python/pytest/py3/_pytest/config/compat.py71
-rw-r--r--contrib/python/pytest/py3/_pytest/config/exceptions.py11
-rw-r--r--contrib/python/pytest/py3/_pytest/config/findpaths.py213
-rw-r--r--contrib/python/pytest/py3/_pytest/debugging.py427
-rw-r--r--contrib/python/pytest/py3/_pytest/deprecated.py123
-rw-r--r--contrib/python/pytest/py3/_pytest/doctest.py734
-rw-r--r--contrib/python/pytest/py3/_pytest/faulthandler.py97
-rw-r--r--contrib/python/pytest/py3/_pytest/fixtures.py1655
-rw-r--r--contrib/python/pytest/py3/_pytest/freeze_support.py44
-rw-r--r--contrib/python/pytest/py3/_pytest/helpconfig.py264
-rw-r--r--contrib/python/pytest/py3/_pytest/hookspec.py892
-rw-r--r--contrib/python/pytest/py3/_pytest/junitxml.py696
-rw-r--r--contrib/python/pytest/py3/_pytest/legacypath.py467
-rw-r--r--contrib/python/pytest/py3/_pytest/logging.py826
-rw-r--r--contrib/python/pytest/py3/_pytest/main.py895
-rw-r--r--contrib/python/pytest/py3/_pytest/mark/__init__.py266
-rw-r--r--contrib/python/pytest/py3/_pytest/mark/expression.py225
-rw-r--r--contrib/python/pytest/py3/_pytest/mark/structures.py593
-rw-r--r--contrib/python/pytest/py3/_pytest/monkeypatch.py383
-rw-r--r--contrib/python/pytest/py3/_pytest/nodes.py762
-rw-r--r--contrib/python/pytest/py3/_pytest/nose.py42
-rw-r--r--contrib/python/pytest/py3/_pytest/outcomes.py307
-rw-r--r--contrib/python/pytest/py3/_pytest/pastebin.py110
-rw-r--r--contrib/python/pytest/py3/_pytest/pathlib.py735
-rw-r--r--contrib/python/pytest/py3/_pytest/py.typed0
-rw-r--r--contrib/python/pytest/py3/_pytest/pytester.py1750
-rw-r--r--contrib/python/pytest/py3/_pytest/pytester_assertions.py75
-rw-r--r--contrib/python/pytest/py3/_pytest/python.py1820
-rw-r--r--contrib/python/pytest/py3/_pytest/python_api.py975
-rw-r--r--contrib/python/pytest/py3/_pytest/python_path.py24
-rw-r--r--contrib/python/pytest/py3/_pytest/recwarn.py296
-rw-r--r--contrib/python/pytest/py3/_pytest/reports.py599
-rw-r--r--contrib/python/pytest/py3/_pytest/runner.py541
-rw-r--r--contrib/python/pytest/py3/_pytest/scope.py91
-rw-r--r--contrib/python/pytest/py3/_pytest/setuponly.py97
-rw-r--r--contrib/python/pytest/py3/_pytest/setupplan.py40
-rw-r--r--contrib/python/pytest/py3/_pytest/skipping.py296
-rw-r--r--contrib/python/pytest/py3/_pytest/stash.py112
-rw-r--r--contrib/python/pytest/py3/_pytest/stepwise.py122
-rw-r--r--contrib/python/pytest/py3/_pytest/terminal.py1400
-rw-r--r--contrib/python/pytest/py3/_pytest/threadexception.py88
-rw-r--r--contrib/python/pytest/py3/_pytest/timing.py12
-rw-r--r--contrib/python/pytest/py3/_pytest/tmpdir.py212
-rw-r--r--contrib/python/pytest/py3/_pytest/unittest.py414
-rw-r--r--contrib/python/pytest/py3/_pytest/unraisableexception.py93
-rw-r--r--contrib/python/pytest/py3/_pytest/warning_types.py138
-rw-r--r--contrib/python/pytest/py3/_pytest/warnings.py148
66 files changed, 0 insertions, 28341 deletions
diff --git a/contrib/python/pytest/py3/_pytest/__init__.py b/contrib/python/pytest/py3/_pytest/__init__.py
deleted file mode 100644
index 8a406c5c75..0000000000
--- a/contrib/python/pytest/py3/_pytest/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-__all__ = ["__version__", "version_tuple"]
-
-try:
- from ._version import version as __version__, version_tuple
-except ImportError: # pragma: no cover
- # broken installation, we don't even try
- # unknown only works because we do poor mans version compare
- __version__ = "unknown"
- version_tuple = (0, 0, "unknown") # type:ignore[assignment]
diff --git a/contrib/python/pytest/py3/_pytest/_argcomplete.py b/contrib/python/pytest/py3/_pytest/_argcomplete.py
deleted file mode 100644
index 120f09ff68..0000000000
--- a/contrib/python/pytest/py3/_pytest/_argcomplete.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""Allow bash-completion for argparse with argcomplete if installed.
-
-Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
-to find the magic string, so _ARGCOMPLETE env. var is never set, and
-this does not need special code).
-
-Function try_argcomplete(parser) should be called directly before
-the call to ArgumentParser.parse_args().
-
-The filescompleter is what you normally would use on the positional
-arguments specification, in order to get "dirname/" after "dirn<TAB>"
-instead of the default "dirname ":
-
- optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter
-
-Other, application specific, completers should go in the file
-doing the add_argument calls as they need to be specified as .completer
-attributes as well. (If argcomplete is not installed, the function the
-attribute points to will not be used).
-
-SPEEDUP
-=======
-
-The generic argcomplete script for bash-completion
-(/etc/bash_completion.d/python-argcomplete.sh)
-uses a python program to determine startup script generated by pip.
-You can speed up completion somewhat by changing this script to include
- # PYTHON_ARGCOMPLETE_OK
-so the python-argcomplete-check-easy-install-script does not
-need to be called to find the entry point of the code and see if that is
-marked with PYTHON_ARGCOMPLETE_OK.
-
-INSTALL/DEBUGGING
-=================
-
-To include this support in another application that has setup.py generated
-scripts:
-
-- Add the line:
- # PYTHON_ARGCOMPLETE_OK
- near the top of the main python entry point.
-
-- Include in the file calling parse_args():
- from _argcomplete import try_argcomplete, filescompleter
- Call try_argcomplete just before parse_args(), and optionally add
- filescompleter to the positional arguments' add_argument().
-
-If things do not work right away:
-
-- Switch on argcomplete debugging with (also helpful when doing custom
- completers):
- export _ARC_DEBUG=1
-
-- Run:
- python-argcomplete-check-easy-install-script $(which appname)
- echo $?
- will echo 0 if the magic line has been found, 1 if not.
-
-- Sometimes it helps to find early on errors using:
- _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
- which should throw a KeyError: 'COMPLINE' (which is properly set by the
- global argcomplete script).
-"""
-import argparse
-import os
-import sys
-from glob import glob
-from typing import Any
-from typing import List
-from typing import Optional
-
-
-class FastFilesCompleter:
- """Fast file completer class."""
-
- def __init__(self, directories: bool = True) -> None:
- self.directories = directories
-
- def __call__(self, prefix: str, **kwargs: Any) -> List[str]:
- # Only called on non option completions.
- if os.path.sep in prefix[1:]:
- prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
- else:
- prefix_dir = 0
- completion = []
- globbed = []
- if "*" not in prefix and "?" not in prefix:
- # We are on unix, otherwise no bash.
- if not prefix or prefix[-1] == os.path.sep:
- globbed.extend(glob(prefix + ".*"))
- prefix += "*"
- globbed.extend(glob(prefix))
- for x in sorted(globbed):
- if os.path.isdir(x):
- x += "/"
- # Append stripping the prefix (like bash, not like compgen).
- completion.append(x[prefix_dir:])
- return completion
-
-
-if os.environ.get("_ARGCOMPLETE"):
- try:
- import argcomplete.completers
- except ImportError:
- sys.exit(-1)
- filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter()
-
- def try_argcomplete(parser: argparse.ArgumentParser) -> None:
- argcomplete.autocomplete(parser, always_complete_options=False)
-
-else:
-
- def try_argcomplete(parser: argparse.ArgumentParser) -> None:
- pass
-
- filescompleter = None
diff --git a/contrib/python/pytest/py3/_pytest/_code/__init__.py b/contrib/python/pytest/py3/_pytest/_code/__init__.py
deleted file mode 100644
index 511d0dde66..0000000000
--- a/contrib/python/pytest/py3/_pytest/_code/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""Python inspection/code generation API."""
-from .code import Code
-from .code import ExceptionInfo
-from .code import filter_traceback
-from .code import Frame
-from .code import getfslineno
-from .code import Traceback
-from .code import TracebackEntry
-from .source import getrawcode
-from .source import Source
-
-__all__ = [
- "Code",
- "ExceptionInfo",
- "filter_traceback",
- "Frame",
- "getfslineno",
- "getrawcode",
- "Traceback",
- "TracebackEntry",
- "Source",
-]
diff --git a/contrib/python/pytest/py3/_pytest/_code/code.py b/contrib/python/pytest/py3/_pytest/_code/code.py
deleted file mode 100644
index 5b758a8848..0000000000
--- a/contrib/python/pytest/py3/_pytest/_code/code.py
+++ /dev/null
@@ -1,1274 +0,0 @@
-import ast
-import inspect
-import os
-import re
-import sys
-import traceback
-from inspect import CO_VARARGS
-from inspect import CO_VARKEYWORDS
-from io import StringIO
-from pathlib import Path
-from traceback import format_exception_only
-from types import CodeType
-from types import FrameType
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import ClassVar
-from typing import Dict
-from typing import Generic
-from typing import Iterable
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import overload
-from typing import Pattern
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-from weakref import ref
-
-import attr
-import pluggy
-
-import _pytest
-from _pytest._code.source import findsource
-from _pytest._code.source import getrawcode
-from _pytest._code.source import getstatementrange_ast
-from _pytest._code.source import Source
-from _pytest._io import TerminalWriter
-from _pytest._io.saferepr import safeformat
-from _pytest._io.saferepr import saferepr
-from _pytest.compat import final
-from _pytest.compat import get_real_func
-from _pytest.deprecated import check_ispytest
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import bestrelpath
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
- from typing_extensions import SupportsIndex
- from weakref import ReferenceType
-
- _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"]
-
-
-class Code:
- """Wrapper around Python code objects."""
-
- __slots__ = ("raw",)
-
- def __init__(self, obj: CodeType) -> None:
- self.raw = obj
-
- @classmethod
- def from_function(cls, obj: object) -> "Code":
- return cls(getrawcode(obj))
-
- def __eq__(self, other):
- return self.raw == other.raw
-
- # Ignore type because of https://github.com/python/mypy/issues/4266.
- __hash__ = None # type: ignore
-
- @property
- def firstlineno(self) -> int:
- return self.raw.co_firstlineno - 1
-
- @property
- def name(self) -> str:
- return self.raw.co_name
-
- @property
- def path(self) -> Union[Path, str]:
- """Return a path object pointing to source code, or an ``str`` in
- case of ``OSError`` / non-existing file."""
- if not self.raw.co_filename:
- return ""
- try:
- p = absolutepath(self.raw.co_filename)
- # maybe don't try this checking
- if not p.exists():
- raise OSError("path check failed.")
- return p
- except OSError:
- # XXX maybe try harder like the weird logic
- # in the standard lib [linecache.updatecache] does?
- return self.raw.co_filename
-
- @property
- def fullsource(self) -> Optional["Source"]:
- """Return a _pytest._code.Source object for the full source file of the code."""
- full, _ = findsource(self.raw)
- return full
-
- def source(self) -> "Source":
- """Return a _pytest._code.Source object for the code object's source only."""
- # return source only for that part of code
- return Source(self.raw)
-
- def getargs(self, var: bool = False) -> Tuple[str, ...]:
- """Return a tuple with the argument names for the code object.
-
- If 'var' is set True also return the names of the variable and
- keyword arguments when present.
- """
- # Handy shortcut for getting args.
- raw = self.raw
- argcount = raw.co_argcount
- if var:
- argcount += raw.co_flags & CO_VARARGS
- argcount += raw.co_flags & CO_VARKEYWORDS
- return raw.co_varnames[:argcount]
-
-
-class Frame:
- """Wrapper around a Python frame holding f_locals and f_globals
- in which expressions can be evaluated."""
-
- __slots__ = ("raw",)
-
- def __init__(self, frame: FrameType) -> None:
- self.raw = frame
-
- @property
- def lineno(self) -> int:
- return self.raw.f_lineno - 1
-
- @property
- def f_globals(self) -> Dict[str, Any]:
- return self.raw.f_globals
-
- @property
- def f_locals(self) -> Dict[str, Any]:
- return self.raw.f_locals
-
- @property
- def code(self) -> Code:
- return Code(self.raw.f_code)
-
- @property
- def statement(self) -> "Source":
- """Statement this frame is at."""
- if self.code.fullsource is None:
- return Source("")
- return self.code.fullsource.getstatement(self.lineno)
-
- def eval(self, code, **vars):
- """Evaluate 'code' in the frame.
-
- 'vars' are optional additional local variables.
-
- Returns the result of the evaluation.
- """
- f_locals = self.f_locals.copy()
- f_locals.update(vars)
- return eval(code, self.f_globals, f_locals)
-
- def repr(self, object: object) -> str:
- """Return a 'safe' (non-recursive, one-line) string repr for 'object'."""
- return saferepr(object)
-
- def getargs(self, var: bool = False):
- """Return a list of tuples (name, value) for all arguments.
-
- If 'var' is set True, also include the variable and keyword arguments
- when present.
- """
- retval = []
- for arg in self.code.getargs(var):
- try:
- retval.append((arg, self.f_locals[arg]))
- except KeyError:
- pass # this can occur when using Psyco
- return retval
-
-
-class TracebackEntry:
- """A single entry in a Traceback."""
-
- __slots__ = ("_rawentry", "_excinfo", "_repr_style")
-
- def __init__(
- self,
- rawentry: TracebackType,
- excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
- ) -> None:
- self._rawentry = rawentry
- self._excinfo = excinfo
- self._repr_style: Optional['Literal["short", "long"]'] = None
-
- @property
- def lineno(self) -> int:
- return self._rawentry.tb_lineno - 1
-
- def set_repr_style(self, mode: "Literal['short', 'long']") -> None:
- assert mode in ("short", "long")
- self._repr_style = mode
-
- @property
- def frame(self) -> Frame:
- return Frame(self._rawentry.tb_frame)
-
- @property
- def relline(self) -> int:
- return self.lineno - self.frame.code.firstlineno
-
- def __repr__(self) -> str:
- return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
-
- @property
- def statement(self) -> "Source":
- """_pytest._code.Source object for the current statement."""
- source = self.frame.code.fullsource
- assert source is not None
- return source.getstatement(self.lineno)
-
- @property
- def path(self) -> Union[Path, str]:
- """Path to the source code."""
- return self.frame.code.path
-
- @property
- def locals(self) -> Dict[str, Any]:
- """Locals of underlying frame."""
- return self.frame.f_locals
-
- def getfirstlinesource(self) -> int:
- return self.frame.code.firstlineno
-
- def getsource(
- self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None
- ) -> Optional["Source"]:
- """Return failing source code."""
- # we use the passed in astcache to not reparse asttrees
- # within exception info printing
- source = self.frame.code.fullsource
- if source is None:
- return None
- key = astnode = None
- if astcache is not None:
- key = self.frame.code.path
- if key is not None:
- astnode = astcache.get(key, None)
- start = self.getfirstlinesource()
- try:
- astnode, _, end = getstatementrange_ast(
- self.lineno, source, astnode=astnode
- )
- except SyntaxError:
- end = self.lineno + 1
- else:
- if key is not None and astcache is not None:
- astcache[key] = astnode
- return source[start:end]
-
- source = property(getsource)
-
- def ishidden(self) -> bool:
- """Return True if the current frame has a var __tracebackhide__
- resolving to True.
-
- If __tracebackhide__ is a callable, it gets called with the
- ExceptionInfo instance and can decide whether to hide the traceback.
-
- Mostly for internal use.
- """
- tbh: Union[
- bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]
- ] = False
- for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals):
- # in normal cases, f_locals and f_globals are dictionaries
- # however via `exec(...)` / `eval(...)` they can be other types
- # (even incorrect types!).
- # as such, we suppress all exceptions while accessing __tracebackhide__
- try:
- tbh = maybe_ns_dct["__tracebackhide__"]
- except Exception:
- pass
- else:
- break
- if tbh and callable(tbh):
- return tbh(None if self._excinfo is None else self._excinfo())
- return tbh
-
- def __str__(self) -> str:
- name = self.frame.code.name
- try:
- line = str(self.statement).lstrip()
- except KeyboardInterrupt:
- raise
- except BaseException:
- line = "???"
- # This output does not quite match Python's repr for traceback entries,
- # but changing it to do so would break certain plugins. See
- # https://github.com/pytest-dev/pytest/pull/7535/ for details.
- return " File %r:%d in %s\n %s\n" % (
- str(self.path),
- self.lineno + 1,
- name,
- line,
- )
-
- @property
- def name(self) -> str:
- """co_name of underlying code."""
- return self.frame.code.raw.co_name
-
-
-class Traceback(List[TracebackEntry]):
- """Traceback objects encapsulate and offer higher level access to Traceback entries."""
-
- def __init__(
- self,
- tb: Union[TracebackType, Iterable[TracebackEntry]],
- excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
- ) -> None:
- """Initialize from given python traceback object and ExceptionInfo."""
- self._excinfo = excinfo
- if isinstance(tb, TracebackType):
-
- def f(cur: TracebackType) -> Iterable[TracebackEntry]:
- cur_: Optional[TracebackType] = cur
- while cur_ is not None:
- yield TracebackEntry(cur_, excinfo=excinfo)
- cur_ = cur_.tb_next
-
- super().__init__(f(tb))
- else:
- super().__init__(tb)
-
- def cut(
- self,
- path: Optional[Union["os.PathLike[str]", str]] = None,
- lineno: Optional[int] = None,
- firstlineno: Optional[int] = None,
- excludepath: Optional["os.PathLike[str]"] = None,
- ) -> "Traceback":
- """Return a Traceback instance wrapping part of this Traceback.
-
- By providing any combination of path, lineno and firstlineno, the
- first frame to start the to-be-returned traceback is determined.
-
- This allows cutting the first part of a Traceback instance e.g.
- for formatting reasons (removing some uninteresting bits that deal
- with handling of the exception/traceback).
- """
- path_ = None if path is None else os.fspath(path)
- excludepath_ = None if excludepath is None else os.fspath(excludepath)
- for x in self:
- code = x.frame.code
- codepath = code.path
- if path is not None and str(codepath) != path_:
- continue
- if (
- excludepath is not None
- and isinstance(codepath, Path)
- and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator]
- ):
- continue
- if lineno is not None and x.lineno != lineno:
- continue
- if firstlineno is not None and x.frame.code.firstlineno != firstlineno:
- continue
- return Traceback(x._rawentry, self._excinfo)
- return self
-
- @overload
- def __getitem__(self, key: "SupportsIndex") -> TracebackEntry:
- ...
-
- @overload
- def __getitem__(self, key: slice) -> "Traceback":
- ...
-
- def __getitem__(
- self, key: Union["SupportsIndex", slice]
- ) -> Union[TracebackEntry, "Traceback"]:
- if isinstance(key, slice):
- return self.__class__(super().__getitem__(key))
- else:
- return super().__getitem__(key)
-
- def filter(
- self, fn: Callable[[TracebackEntry], bool] = lambda x: not x.ishidden()
- ) -> "Traceback":
- """Return a Traceback instance with certain items removed
-
- fn is a function that gets a single argument, a TracebackEntry
- instance, and should return True when the item should be added
- to the Traceback, False when not.
-
- By default this removes all the TracebackEntries which are hidden
- (see ishidden() above).
- """
- return Traceback(filter(fn, self), self._excinfo)
-
- def getcrashentry(self) -> TracebackEntry:
- """Return last non-hidden traceback entry that lead to the exception of a traceback."""
- for i in range(-1, -len(self) - 1, -1):
- entry = self[i]
- if not entry.ishidden():
- return entry
- return self[-1]
-
- def recursionindex(self) -> Optional[int]:
- """Return the index of the frame/TracebackEntry where recursion originates if
- appropriate, None if no recursion occurred."""
- cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {}
- for i, entry in enumerate(self):
- # id for the code.raw is needed to work around
- # the strange metaprogramming in the decorator lib from pypi
- # which generates code objects that have hash/value equality
- # XXX needs a test
- key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
- # print "checking for recursion at", key
- values = cache.setdefault(key, [])
- if values:
- f = entry.frame
- loc = f.f_locals
- for otherloc in values:
- if otherloc == loc:
- return i
- values.append(entry.frame.f_locals)
- return None
-
-
-E = TypeVar("E", bound=BaseException, covariant=True)
-
-
-@final
-@attr.s(repr=False, init=False, auto_attribs=True)
-class ExceptionInfo(Generic[E]):
- """Wraps sys.exc_info() objects and offers help for navigating the traceback."""
-
- _assert_start_repr: ClassVar = "AssertionError('assert "
-
- _excinfo: Optional[Tuple[Type["E"], "E", TracebackType]]
- _striptext: str
- _traceback: Optional[Traceback]
-
- def __init__(
- self,
- excinfo: Optional[Tuple[Type["E"], "E", TracebackType]],
- striptext: str = "",
- traceback: Optional[Traceback] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self._excinfo = excinfo
- self._striptext = striptext
- self._traceback = traceback
-
- @classmethod
- def from_exc_info(
- cls,
- exc_info: Tuple[Type[E], E, TracebackType],
- exprinfo: Optional[str] = None,
- ) -> "ExceptionInfo[E]":
- """Return an ExceptionInfo for an existing exc_info tuple.
-
- .. warning::
-
- Experimental API
-
- :param exprinfo:
- A text string helping to determine if we should strip
- ``AssertionError`` from the output. Defaults to the exception
- message/``__str__()``.
- """
- _striptext = ""
- if exprinfo is None and isinstance(exc_info[1], AssertionError):
- exprinfo = getattr(exc_info[1], "msg", None)
- if exprinfo is None:
- exprinfo = saferepr(exc_info[1])
- if exprinfo and exprinfo.startswith(cls._assert_start_repr):
- _striptext = "AssertionError: "
-
- return cls(exc_info, _striptext, _ispytest=True)
-
- @classmethod
- def from_current(
- cls, exprinfo: Optional[str] = None
- ) -> "ExceptionInfo[BaseException]":
- """Return an ExceptionInfo matching the current traceback.
-
- .. warning::
-
- Experimental API
-
- :param exprinfo:
- A text string helping to determine if we should strip
- ``AssertionError`` from the output. Defaults to the exception
- message/``__str__()``.
- """
- tup = sys.exc_info()
- assert tup[0] is not None, "no current exception"
- assert tup[1] is not None, "no current exception"
- assert tup[2] is not None, "no current exception"
- exc_info = (tup[0], tup[1], tup[2])
- return ExceptionInfo.from_exc_info(exc_info, exprinfo)
-
- @classmethod
- def for_later(cls) -> "ExceptionInfo[E]":
- """Return an unfilled ExceptionInfo."""
- return cls(None, _ispytest=True)
-
- def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None:
- """Fill an unfilled ExceptionInfo created with ``for_later()``."""
- assert self._excinfo is None, "ExceptionInfo was already filled"
- self._excinfo = exc_info
-
- @property
- def type(self) -> Type[E]:
- """The exception class."""
- assert (
- self._excinfo is not None
- ), ".type can only be used after the context manager exits"
- return self._excinfo[0]
-
- @property
- def value(self) -> E:
- """The exception value."""
- assert (
- self._excinfo is not None
- ), ".value can only be used after the context manager exits"
- return self._excinfo[1]
-
- @property
- def tb(self) -> TracebackType:
- """The exception raw traceback."""
- assert (
- self._excinfo is not None
- ), ".tb can only be used after the context manager exits"
- return self._excinfo[2]
-
- @property
- def typename(self) -> str:
- """The type name of the exception."""
- assert (
- self._excinfo is not None
- ), ".typename can only be used after the context manager exits"
- return self.type.__name__
-
- @property
- def traceback(self) -> Traceback:
- """The traceback."""
- if self._traceback is None:
- self._traceback = Traceback(self.tb, excinfo=ref(self))
- return self._traceback
-
- @traceback.setter
- def traceback(self, value: Traceback) -> None:
- self._traceback = value
-
- def __repr__(self) -> str:
- if self._excinfo is None:
- return "<ExceptionInfo for raises contextmanager>"
- return "<{} {} tblen={}>".format(
- self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback)
- )
-
- def exconly(self, tryshort: bool = False) -> str:
- """Return the exception as a string.
-
- When 'tryshort' resolves to True, and the exception is an
- AssertionError, only the actual exception part of the exception
- representation is returned (so 'AssertionError: ' is removed from
- the beginning).
- """
- lines = format_exception_only(self.type, self.value)
- text = "".join(lines)
- text = text.rstrip()
- if tryshort:
- if text.startswith(self._striptext):
- text = text[len(self._striptext) :]
- return text
-
- def errisinstance(
- self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]]
- ) -> bool:
- """Return True if the exception is an instance of exc.
-
- Consider using ``isinstance(excinfo.value, exc)`` instead.
- """
- return isinstance(self.value, exc)
-
- def _getreprcrash(self) -> "ReprFileLocation":
- exconly = self.exconly(tryshort=True)
- entry = self.traceback.getcrashentry()
- path, lineno = entry.frame.code.raw.co_filename, entry.lineno
- return ReprFileLocation(path, lineno + 1, exconly)
-
- def getrepr(
- self,
- showlocals: bool = False,
- style: "_TracebackStyle" = "long",
- abspath: bool = False,
- tbfilter: bool = True,
- funcargs: bool = False,
- truncate_locals: bool = True,
- chain: bool = True,
- ) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]:
- """Return str()able representation of this exception info.
-
- :param bool showlocals:
- Show locals per traceback entry.
- Ignored if ``style=="native"``.
-
- :param str style:
- long|short|no|native|value traceback style.
-
- :param bool abspath:
- If paths should be changed to absolute or left unchanged.
-
- :param bool tbfilter:
- Hide entries that contain a local variable ``__tracebackhide__==True``.
- Ignored if ``style=="native"``.
-
- :param bool funcargs:
- Show fixtures ("funcargs" for legacy purposes) per traceback entry.
-
- :param bool truncate_locals:
- With ``showlocals==True``, make sure locals can be safely represented as strings.
-
- :param bool chain:
- If chained exceptions in Python 3 should be shown.
-
- .. versionchanged:: 3.9
-
- Added the ``chain`` parameter.
- """
- if style == "native":
- return ReprExceptionInfo(
- ReprTracebackNative(
- traceback.format_exception(
- self.type, self.value, self.traceback[0]._rawentry
- )
- ),
- self._getreprcrash(),
- )
-
- fmt = FormattedExcinfo(
- showlocals=showlocals,
- style=style,
- abspath=abspath,
- tbfilter=tbfilter,
- funcargs=funcargs,
- truncate_locals=truncate_locals,
- chain=chain,
- )
- return fmt.repr_excinfo(self)
-
- def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]":
- """Check whether the regular expression `regexp` matches the string
- representation of the exception using :func:`python:re.search`.
-
- If it matches `True` is returned, otherwise an `AssertionError` is raised.
- """
- __tracebackhide__ = True
- msg = "Regex pattern {!r} does not match {!r}."
- if regexp == str(self.value):
- msg += " Did you mean to `re.escape()` the regex?"
- assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value))
- # Return True to allow for "assert excinfo.match()".
- return True
-
-
-@attr.s(auto_attribs=True)
-class FormattedExcinfo:
- """Presenting information about failing Functions and Generators."""
-
- # for traceback entries
- flow_marker: ClassVar = ">"
- fail_marker: ClassVar = "E"
-
- showlocals: bool = False
- style: "_TracebackStyle" = "long"
- abspath: bool = True
- tbfilter: bool = True
- funcargs: bool = False
- truncate_locals: bool = True
- chain: bool = True
- astcache: Dict[Union[str, Path], ast.AST] = attr.ib(
- factory=dict, init=False, repr=False
- )
-
- def _getindent(self, source: "Source") -> int:
- # Figure out indent for the given source.
- try:
- s = str(source.getstatement(len(source) - 1))
- except KeyboardInterrupt:
- raise
- except BaseException:
- try:
- s = str(source[-1])
- except KeyboardInterrupt:
- raise
- except BaseException:
- return 0
- return 4 + (len(s) - len(s.lstrip()))
-
- def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]:
- source = entry.getsource(self.astcache)
- if source is not None:
- source = source.deindent()
- return source
-
- def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]:
- if self.funcargs:
- args = []
- for argname, argvalue in entry.frame.getargs(var=True):
- args.append((argname, saferepr(argvalue)))
- return ReprFuncArgs(args)
- return None
-
- def get_source(
- self,
- source: Optional["Source"],
- line_index: int = -1,
- excinfo: Optional[ExceptionInfo[BaseException]] = None,
- short: bool = False,
- ) -> List[str]:
- """Return formatted and marked up source lines."""
- lines = []
- if source is None or line_index >= len(source.lines):
- source = Source("???")
- line_index = 0
- if line_index < 0:
- line_index += len(source)
- space_prefix = " "
- if short:
- lines.append(space_prefix + source.lines[line_index].strip())
- else:
- for line in source.lines[:line_index]:
- lines.append(space_prefix + line)
- lines.append(self.flow_marker + " " + source.lines[line_index])
- for line in source.lines[line_index + 1 :]:
- lines.append(space_prefix + line)
- if excinfo is not None:
- indent = 4 if short else self._getindent(source)
- lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
- return lines
-
- def get_exconly(
- self,
- excinfo: ExceptionInfo[BaseException],
- indent: int = 4,
- markall: bool = False,
- ) -> List[str]:
- lines = []
- indentstr = " " * indent
- # Get the real exception information out.
- exlines = excinfo.exconly(tryshort=True).split("\n")
- failindent = self.fail_marker + indentstr[1:]
- for line in exlines:
- lines.append(failindent + line)
- if not markall:
- failindent = indentstr
- return lines
-
- def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]:
- if self.showlocals:
- lines = []
- keys = [loc for loc in locals if loc[0] != "@"]
- keys.sort()
- for name in keys:
- value = locals[name]
- if name == "__builtins__":
- lines.append("__builtins__ = <builtins>")
- else:
- # This formatting could all be handled by the
- # _repr() function, which is only reprlib.Repr in
- # disguise, so is very configurable.
- if self.truncate_locals:
- str_repr = saferepr(value)
- else:
- str_repr = safeformat(value)
- # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
- lines.append(f"{name:<10} = {str_repr}")
- # else:
- # self._line("%-10s =\\" % (name,))
- # # XXX
- # pprint.pprint(value, stream=self.excinfowriter)
- return ReprLocals(lines)
- return None
-
- def repr_traceback_entry(
- self,
- entry: TracebackEntry,
- excinfo: Optional[ExceptionInfo[BaseException]] = None,
- ) -> "ReprEntry":
- lines: List[str] = []
- style = entry._repr_style if entry._repr_style is not None else self.style
- if style in ("short", "long"):
- source = self._getentrysource(entry)
- if source is None:
- source = Source("???")
- line_index = 0
- else:
- line_index = entry.lineno - entry.getfirstlinesource()
- short = style == "short"
- reprargs = self.repr_args(entry) if not short else None
- s = self.get_source(source, line_index, excinfo, short=short)
- lines.extend(s)
- if short:
- message = "in %s" % (entry.name)
- else:
- message = excinfo and excinfo.typename or ""
- entry_path = entry.path
- path = self._makepath(entry_path)
- reprfileloc = ReprFileLocation(path, entry.lineno + 1, message)
- localsrepr = self.repr_locals(entry.locals)
- return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style)
- elif style == "value":
- if excinfo:
- lines.extend(str(excinfo.value).split("\n"))
- return ReprEntry(lines, None, None, None, style)
- else:
- if excinfo:
- lines.extend(self.get_exconly(excinfo, indent=4))
- return ReprEntry(lines, None, None, None, style)
-
- def _makepath(self, path: Union[Path, str]) -> str:
- if not self.abspath and isinstance(path, Path):
- try:
- np = bestrelpath(Path.cwd(), path)
- except OSError:
- return str(path)
- if len(np) < len(str(path)):
- return np
- return str(path)
-
- def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback":
- traceback = excinfo.traceback
- if self.tbfilter:
- traceback = traceback.filter()
-
- if isinstance(excinfo.value, RecursionError):
- traceback, extraline = self._truncate_recursive_traceback(traceback)
- else:
- extraline = None
-
- last = traceback[-1]
- entries = []
- if self.style == "value":
- reprentry = self.repr_traceback_entry(last, excinfo)
- entries.append(reprentry)
- return ReprTraceback(entries, None, style=self.style)
-
- for index, entry in enumerate(traceback):
- einfo = (last == entry) and excinfo or None
- reprentry = self.repr_traceback_entry(entry, einfo)
- entries.append(reprentry)
- return ReprTraceback(entries, extraline, style=self.style)
-
- def _truncate_recursive_traceback(
- self, traceback: Traceback
- ) -> Tuple[Traceback, Optional[str]]:
- """Truncate the given recursive traceback trying to find the starting
- point of the recursion.
-
- The detection is done by going through each traceback entry and
- finding the point in which the locals of the frame are equal to the
- locals of a previous frame (see ``recursionindex()``).
-
- Handle the situation where the recursion process might raise an
- exception (for example comparing numpy arrays using equality raises a
- TypeError), in which case we do our best to warn the user of the
- error and show a limited traceback.
- """
- try:
- recursionindex = traceback.recursionindex()
- except Exception as e:
- max_frames = 10
- extraline: Optional[str] = (
- "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
- " The following exception happened when comparing locals in the stack frame:\n"
- " {exc_type}: {exc_msg}\n"
- " Displaying first and last {max_frames} stack frames out of {total}."
- ).format(
- exc_type=type(e).__name__,
- exc_msg=str(e),
- max_frames=max_frames,
- total=len(traceback),
- )
- # Type ignored because adding two instances of a List subtype
- # currently incorrectly has type List instead of the subtype.
- traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore
- else:
- if recursionindex is not None:
- extraline = "!!! Recursion detected (same locals & position)"
- traceback = traceback[: recursionindex + 1]
- else:
- extraline = None
-
- return traceback, extraline
-
- def repr_excinfo(
- self, excinfo: ExceptionInfo[BaseException]
- ) -> "ExceptionChainRepr":
- repr_chain: List[
- Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]
- ] = []
- e: Optional[BaseException] = excinfo.value
- excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo
- descr = None
- seen: Set[int] = set()
- while e is not None and id(e) not in seen:
- seen.add(id(e))
- if excinfo_:
- reprtraceback = self.repr_traceback(excinfo_)
- reprcrash: Optional[ReprFileLocation] = (
- excinfo_._getreprcrash() if self.style != "value" else None
- )
- else:
- # Fallback to native repr if the exception doesn't have a traceback:
- # ExceptionInfo objects require a full traceback to work.
- reprtraceback = ReprTracebackNative(
- traceback.format_exception(type(e), e, None)
- )
- reprcrash = None
-
- repr_chain += [(reprtraceback, reprcrash, descr)]
- if e.__cause__ is not None and self.chain:
- e = e.__cause__
- excinfo_ = (
- ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
- if e.__traceback__
- else None
- )
- descr = "The above exception was the direct cause of the following exception:"
- elif (
- e.__context__ is not None and not e.__suppress_context__ and self.chain
- ):
- e = e.__context__
- excinfo_ = (
- ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
- if e.__traceback__
- else None
- )
- descr = "During handling of the above exception, another exception occurred:"
- else:
- e = None
- repr_chain.reverse()
- return ExceptionChainRepr(repr_chain)
-
-
-@attr.s(eq=False, auto_attribs=True)
-class TerminalRepr:
- def __str__(self) -> str:
- # FYI this is called from pytest-xdist's serialization of exception
- # information.
- io = StringIO()
- tw = TerminalWriter(file=io)
- self.toterminal(tw)
- return io.getvalue().strip()
-
- def __repr__(self) -> str:
- return f"<{self.__class__} instance at {id(self):0x}>"
-
- def toterminal(self, tw: TerminalWriter) -> None:
- raise NotImplementedError()
-
-
-# This class is abstract -- only subclasses are instantiated.
-@attr.s(eq=False)
-class ExceptionRepr(TerminalRepr):
- # Provided by subclasses.
- reprcrash: Optional["ReprFileLocation"]
- reprtraceback: "ReprTraceback"
-
- def __attrs_post_init__(self) -> None:
- self.sections: List[Tuple[str, str, str]] = []
-
- def addsection(self, name: str, content: str, sep: str = "-") -> None:
- self.sections.append((name, content, sep))
-
- def toterminal(self, tw: TerminalWriter) -> None:
- for name, content, sep in self.sections:
- tw.sep(sep, name)
- tw.line(content)
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ExceptionChainRepr(ExceptionRepr):
- chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]]
-
- def __attrs_post_init__(self) -> None:
- super().__attrs_post_init__()
- # reprcrash and reprtraceback of the outermost (the newest) exception
- # in the chain.
- self.reprtraceback = self.chain[-1][0]
- self.reprcrash = self.chain[-1][1]
-
- def toterminal(self, tw: TerminalWriter) -> None:
- for element in self.chain:
- element[0].toterminal(tw)
- if element[2] is not None:
- tw.line("")
- tw.line(element[2], yellow=True)
- super().toterminal(tw)
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprExceptionInfo(ExceptionRepr):
- reprtraceback: "ReprTraceback"
- reprcrash: "ReprFileLocation"
-
- def toterminal(self, tw: TerminalWriter) -> None:
- self.reprtraceback.toterminal(tw)
- super().toterminal(tw)
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprTraceback(TerminalRepr):
- reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]]
- extraline: Optional[str]
- style: "_TracebackStyle"
-
- entrysep: ClassVar = "_ "
-
- def toterminal(self, tw: TerminalWriter) -> None:
- # The entries might have different styles.
- for i, entry in enumerate(self.reprentries):
- if entry.style == "long":
- tw.line("")
- entry.toterminal(tw)
- if i < len(self.reprentries) - 1:
- next_entry = self.reprentries[i + 1]
- if (
- entry.style == "long"
- or entry.style == "short"
- and next_entry.style == "long"
- ):
- tw.sep(self.entrysep)
-
- if self.extraline:
- tw.line(self.extraline)
-
-
-class ReprTracebackNative(ReprTraceback):
- def __init__(self, tblines: Sequence[str]) -> None:
- self.style = "native"
- self.reprentries = [ReprEntryNative(tblines)]
- self.extraline = None
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprEntryNative(TerminalRepr):
- lines: Sequence[str]
-
- style: ClassVar["_TracebackStyle"] = "native"
-
- def toterminal(self, tw: TerminalWriter) -> None:
- tw.write("".join(self.lines))
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprEntry(TerminalRepr):
- lines: Sequence[str]
- reprfuncargs: Optional["ReprFuncArgs"]
- reprlocals: Optional["ReprLocals"]
- reprfileloc: Optional["ReprFileLocation"]
- style: "_TracebackStyle"
-
- def _write_entry_lines(self, tw: TerminalWriter) -> None:
- """Write the source code portions of a list of traceback entries with syntax highlighting.
-
- Usually entries are lines like these:
-
- " x = 1"
- "> assert x == 2"
- "E assert 1 == 2"
-
- This function takes care of rendering the "source" portions of it (the lines without
- the "E" prefix) using syntax highlighting, taking care to not highlighting the ">"
- character, as doing so might break line continuations.
- """
-
- if not self.lines:
- return
-
- # separate indents and source lines that are not failures: we want to
- # highlight the code but not the indentation, which may contain markers
- # such as "> assert 0"
- fail_marker = f"{FormattedExcinfo.fail_marker} "
- indent_size = len(fail_marker)
- indents: List[str] = []
- source_lines: List[str] = []
- failure_lines: List[str] = []
- for index, line in enumerate(self.lines):
- is_failure_line = line.startswith(fail_marker)
- if is_failure_line:
- # from this point on all lines are considered part of the failure
- failure_lines.extend(self.lines[index:])
- break
- else:
- if self.style == "value":
- source_lines.append(line)
- else:
- indents.append(line[:indent_size])
- source_lines.append(line[indent_size:])
-
- tw._write_source(source_lines, indents)
-
- # failure lines are always completely red and bold
- for line in failure_lines:
- tw.line(line, bold=True, red=True)
-
- def toterminal(self, tw: TerminalWriter) -> None:
- if self.style == "short":
- assert self.reprfileloc is not None
- self.reprfileloc.toterminal(tw)
- self._write_entry_lines(tw)
- if self.reprlocals:
- self.reprlocals.toterminal(tw, indent=" " * 8)
- return
-
- if self.reprfuncargs:
- self.reprfuncargs.toterminal(tw)
-
- self._write_entry_lines(tw)
-
- if self.reprlocals:
- tw.line("")
- self.reprlocals.toterminal(tw)
- if self.reprfileloc:
- if self.lines:
- tw.line("")
- self.reprfileloc.toterminal(tw)
-
- def __str__(self) -> str:
- return "{}\n{}\n{}".format(
- "\n".join(self.lines), self.reprlocals, self.reprfileloc
- )
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprFileLocation(TerminalRepr):
- path: str = attr.ib(converter=str)
- lineno: int
- message: str
-
- def toterminal(self, tw: TerminalWriter) -> None:
- # Filename and lineno output for each entry, using an output format
- # that most editors understand.
- msg = self.message
- i = msg.find("\n")
- if i != -1:
- msg = msg[:i]
- tw.write(self.path, bold=True, red=True)
- tw.line(f":{self.lineno}: {msg}")
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprLocals(TerminalRepr):
- lines: Sequence[str]
-
- def toterminal(self, tw: TerminalWriter, indent="") -> None:
- for line in self.lines:
- tw.line(indent + line)
-
-
-@attr.s(eq=False, auto_attribs=True)
-class ReprFuncArgs(TerminalRepr):
- args: Sequence[Tuple[str, object]]
-
- def toterminal(self, tw: TerminalWriter) -> None:
- if self.args:
- linesofar = ""
- for name, value in self.args:
- ns = f"{name} = {value}"
- if len(ns) + len(linesofar) + 2 > tw.fullwidth:
- if linesofar:
- tw.line(linesofar)
- linesofar = ns
- else:
- if linesofar:
- linesofar += ", " + ns
- else:
- linesofar = ns
- if linesofar:
- tw.line(linesofar)
- tw.line("")
-
-
-def getfslineno(obj: object) -> Tuple[Union[str, Path], int]:
- """Return source location (path, lineno) for the given object.
-
- If the source cannot be determined return ("", -1).
-
- The line number is 0-based.
- """
- # xxx let decorators etc specify a sane ordering
- # NOTE: this used to be done in _pytest.compat.getfslineno, initially added
- # in 6ec13a2b9. It ("place_as") appears to be something very custom.
- obj = get_real_func(obj)
- if hasattr(obj, "place_as"):
- obj = obj.place_as # type: ignore[attr-defined]
-
- try:
- code = Code.from_function(obj)
- except TypeError:
- try:
- fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type]
- except TypeError:
- return "", -1
-
- fspath = fn and absolutepath(fn) or ""
- lineno = -1
- if fspath:
- try:
- _, lineno = findsource(obj)
- except OSError:
- pass
- return fspath, lineno
-
- return code.path, code.firstlineno
-
-
-# Relative paths that we use to filter traceback entries from appearing to the user;
-# see filter_traceback.
-# note: if we need to add more paths than what we have now we should probably use a list
-# for better maintenance.
-
-_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc"))
-# pluggy is either a package or a single module depending on the version
-if _PLUGGY_DIR.name == "__init__.py":
- _PLUGGY_DIR = _PLUGGY_DIR.parent
-_PYTEST_DIR = Path(_pytest.__file__).parent
-
-
-def filter_traceback(entry: TracebackEntry) -> bool:
- """Return True if a TracebackEntry instance should be included in tracebacks.
-
- We hide traceback entries of:
-
- * dynamically generated code (no code to show up for it);
- * internal traceback from pytest or its internal libraries, py and pluggy.
- """
- # entry.path might sometimes return a str object when the entry
- # points to dynamically generated code.
- # See https://bitbucket.org/pytest-dev/py/issues/71.
- raw_filename = entry.frame.code.raw.co_filename
- is_generated = "<" in raw_filename and ">" in raw_filename
- if is_generated:
- return False
-
- # entry.path might point to a non-existing file, in which case it will
- # also return a str object. See #1133.
- p = Path(entry.path)
-
- parents = p.parents
- if _PLUGGY_DIR in parents:
- return False
- if _PYTEST_DIR in parents:
- return False
-
- return True
diff --git a/contrib/python/pytest/py3/_pytest/_code/source.py b/contrib/python/pytest/py3/_pytest/_code/source.py
deleted file mode 100644
index 208cfb8003..0000000000
--- a/contrib/python/pytest/py3/_pytest/_code/source.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import ast
-import inspect
-import textwrap
-import tokenize
-import types
-import warnings
-from bisect import bisect_right
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import overload
-from typing import Tuple
-from typing import Union
-
-
-class Source:
- """An immutable object holding a source code fragment.
-
- When using Source(...), the source lines are deindented.
- """
-
- def __init__(self, obj: object = None) -> None:
- if not obj:
- self.lines: List[str] = []
- elif isinstance(obj, Source):
- self.lines = obj.lines
- elif isinstance(obj, (tuple, list)):
- self.lines = deindent(x.rstrip("\n") for x in obj)
- elif isinstance(obj, str):
- self.lines = deindent(obj.split("\n"))
- else:
- try:
- rawcode = getrawcode(obj)
- src = inspect.getsource(rawcode)
- except TypeError:
- src = inspect.getsource(obj) # type: ignore[arg-type]
- self.lines = deindent(src.split("\n"))
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, Source):
- return NotImplemented
- return self.lines == other.lines
-
- # Ignore type because of https://github.com/python/mypy/issues/4266.
- __hash__ = None # type: ignore
-
- @overload
- def __getitem__(self, key: int) -> str:
- ...
-
- @overload
- def __getitem__(self, key: slice) -> "Source":
- ...
-
- def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]:
- if isinstance(key, int):
- return self.lines[key]
- else:
- if key.step not in (None, 1):
- raise IndexError("cannot slice a Source with a step")
- newsource = Source()
- newsource.lines = self.lines[key.start : key.stop]
- return newsource
-
- def __iter__(self) -> Iterator[str]:
- return iter(self.lines)
-
- def __len__(self) -> int:
- return len(self.lines)
-
- def strip(self) -> "Source":
- """Return new Source object with trailing and leading blank lines removed."""
- start, end = 0, len(self)
- while start < end and not self.lines[start].strip():
- start += 1
- while end > start and not self.lines[end - 1].strip():
- end -= 1
- source = Source()
- source.lines[:] = self.lines[start:end]
- return source
-
- def indent(self, indent: str = " " * 4) -> "Source":
- """Return a copy of the source object with all lines indented by the
- given indent-string."""
- newsource = Source()
- newsource.lines = [(indent + line) for line in self.lines]
- return newsource
-
- def getstatement(self, lineno: int) -> "Source":
- """Return Source statement which contains the given linenumber
- (counted from 0)."""
- start, end = self.getstatementrange(lineno)
- return self[start:end]
-
- def getstatementrange(self, lineno: int) -> Tuple[int, int]:
- """Return (start, end) tuple which spans the minimal statement region
- which containing the given lineno."""
- if not (0 <= lineno < len(self)):
- raise IndexError("lineno out of range")
- ast, start, end = getstatementrange_ast(lineno, self)
- return start, end
-
- def deindent(self) -> "Source":
- """Return a new Source object deindented."""
- newsource = Source()
- newsource.lines[:] = deindent(self.lines)
- return newsource
-
- def __str__(self) -> str:
- return "\n".join(self.lines)
-
-
-#
-# helper functions
-#
-
-
-def findsource(obj) -> Tuple[Optional[Source], int]:
- try:
- sourcelines, lineno = inspect.findsource(obj)
- except Exception:
- return None, -1
- source = Source()
- source.lines = [line.rstrip() for line in sourcelines]
- return source, lineno
-
-
-def getrawcode(obj: object, trycall: bool = True) -> types.CodeType:
- """Return code object for given function."""
- try:
- return obj.__code__ # type: ignore[attr-defined,no-any-return]
- except AttributeError:
- pass
- if trycall:
- call = getattr(obj, "__call__", None)
- if call and not isinstance(obj, type):
- return getrawcode(call, trycall=False)
- raise TypeError(f"could not get code object for {obj!r}")
-
-
-def deindent(lines: Iterable[str]) -> List[str]:
- return textwrap.dedent("\n".join(lines)).splitlines()
-
-
-def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]:
- # Flatten all statements and except handlers into one lineno-list.
- # AST's line numbers start indexing at 1.
- values: List[int] = []
- for x in ast.walk(node):
- if isinstance(x, (ast.stmt, ast.ExceptHandler)):
- # Before Python 3.8, the lineno of a decorated class or function pointed at the decorator.
- # Since Python 3.8, the lineno points to the class/def, so need to include the decorators.
- if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
- for d in x.decorator_list:
- values.append(d.lineno - 1)
- values.append(x.lineno - 1)
- for name in ("finalbody", "orelse"):
- val: Optional[List[ast.stmt]] = getattr(x, name, None)
- if val:
- # Treat the finally/orelse part as its own statement.
- values.append(val[0].lineno - 1 - 1)
- values.sort()
- insert_index = bisect_right(values, lineno)
- start = values[insert_index - 1]
- if insert_index >= len(values):
- end = None
- else:
- end = values[insert_index]
- return start, end
-
-
-def getstatementrange_ast(
- lineno: int,
- source: Source,
- assertion: bool = False,
- astnode: Optional[ast.AST] = None,
-) -> Tuple[ast.AST, int, int]:
- if astnode is None:
- content = str(source)
- # See #4260:
- # Don't produce duplicate warnings when compiling source to find AST.
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- astnode = ast.parse(content, "source", "exec")
-
- start, end = get_statement_startend2(lineno, astnode)
- # We need to correct the end:
- # - ast-parsing strips comments
- # - there might be empty lines
- # - we might have lesser indented code blocks at the end
- if end is None:
- end = len(source.lines)
-
- if end > start + 1:
- # Make sure we don't span differently indented code blocks
- # by using the BlockFinder helper used which inspect.getsource() uses itself.
- block_finder = inspect.BlockFinder()
- # If we start with an indented line, put blockfinder to "started" mode.
- block_finder.started = source.lines[start][0].isspace()
- it = ((x + "\n") for x in source.lines[start:end])
- try:
- for tok in tokenize.generate_tokens(lambda: next(it)):
- block_finder.tokeneater(*tok)
- except (inspect.EndOfBlock, IndentationError):
- end = block_finder.last + start
- except Exception:
- pass
-
- # The end might still point to a comment or empty line, correct it.
- while end:
- line = source.lines[end - 1].lstrip()
- if line.startswith("#") or not line:
- end -= 1
- else:
- break
- return astnode, start, end
diff --git a/contrib/python/pytest/py3/_pytest/_io/__init__.py b/contrib/python/pytest/py3/_pytest/_io/__init__.py
deleted file mode 100644
index db001e918c..0000000000
--- a/contrib/python/pytest/py3/_pytest/_io/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .terminalwriter import get_terminal_width
-from .terminalwriter import TerminalWriter
-
-
-__all__ = [
- "TerminalWriter",
- "get_terminal_width",
-]
diff --git a/contrib/python/pytest/py3/_pytest/_io/saferepr.py b/contrib/python/pytest/py3/_pytest/_io/saferepr.py
deleted file mode 100644
index e7ff5cab20..0000000000
--- a/contrib/python/pytest/py3/_pytest/_io/saferepr.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import pprint
-import reprlib
-from typing import Any
-from typing import Dict
-from typing import IO
-from typing import Optional
-
-
-def _try_repr_or_str(obj: object) -> str:
- try:
- return repr(obj)
- except (KeyboardInterrupt, SystemExit):
- raise
- except BaseException:
- return f'{type(obj).__name__}("{obj}")'
-
-
-def _format_repr_exception(exc: BaseException, obj: object) -> str:
- try:
- exc_info = _try_repr_or_str(exc)
- except (KeyboardInterrupt, SystemExit):
- raise
- except BaseException as exc:
- exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})"
- return "<[{} raised in repr()] {} object at 0x{:x}>".format(
- exc_info, type(obj).__name__, id(obj)
- )
-
-
-def _ellipsize(s: str, maxsize: int) -> str:
- if len(s) > maxsize:
- i = max(0, (maxsize - 3) // 2)
- j = max(0, maxsize - 3 - i)
- return s[:i] + "..." + s[len(s) - j :]
- return s
-
-
-class SafeRepr(reprlib.Repr):
- """
- repr.Repr that limits the resulting size of repr() and includes
- information on exceptions raised during the call.
- """
-
- def __init__(self, maxsize: Optional[int]) -> None:
- """
- :param maxsize:
- If not None, will truncate the resulting repr to that specific size, using ellipsis
- somewhere in the middle to hide the extra text.
- If None, will not impose any size limits on the returning repr.
- """
- super().__init__()
- # ``maxstring`` is used by the superclass, and needs to be an int; using a
- # very large number in case maxsize is None, meaning we want to disable
- # truncation.
- self.maxstring = maxsize if maxsize is not None else 1_000_000_000
- self.maxsize = maxsize
-
- def repr(self, x: object) -> str:
- try:
- s = super().repr(x)
- except (KeyboardInterrupt, SystemExit):
- raise
- except BaseException as exc:
- s = _format_repr_exception(exc, x)
- if self.maxsize is not None:
- s = _ellipsize(s, self.maxsize)
- return s
-
- def repr_instance(self, x: object, level: int) -> str:
- try:
- s = repr(x)
- except (KeyboardInterrupt, SystemExit):
- raise
- except BaseException as exc:
- s = _format_repr_exception(exc, x)
- if self.maxsize is not None:
- s = _ellipsize(s, self.maxsize)
- return s
-
-
-def safeformat(obj: object) -> str:
- """Return a pretty printed string for the given object.
-
- Failing __repr__ functions of user instances will be represented
- with a short exception info.
- """
- try:
- return pprint.pformat(obj)
- except Exception as exc:
- return _format_repr_exception(exc, obj)
-
-
-# Maximum size of overall repr of objects to display during assertion errors.
-DEFAULT_REPR_MAX_SIZE = 240
-
-
-def saferepr(obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE) -> str:
- """Return a size-limited safe repr-string for the given object.
-
- Failing __repr__ functions of user instances will be represented
- with a short exception info and 'saferepr' generally takes
- care to never raise exceptions itself.
-
- This function is a wrapper around the Repr/reprlib functionality of the
- stdlib.
- """
- return SafeRepr(maxsize).repr(obj)
-
-
-class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter):
- """PrettyPrinter that always dispatches (regardless of width)."""
-
- def _format(
- self,
- object: object,
- stream: IO[str],
- indent: int,
- allowance: int,
- context: Dict[int, Any],
- level: int,
- ) -> None:
- # Type ignored because _dispatch is private.
- p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined]
-
- objid = id(object)
- if objid in context or p is None:
- # Type ignored because _format is private.
- super()._format( # type: ignore[misc]
- object,
- stream,
- indent,
- allowance,
- context,
- level,
- )
- return
-
- context[objid] = 1
- p(self, object, stream, indent, allowance, context, level + 1)
- del context[objid]
-
-
-def _pformat_dispatch(
- object: object,
- indent: int = 1,
- width: int = 80,
- depth: Optional[int] = None,
- *,
- compact: bool = False,
-) -> str:
- return AlwaysDispatchingPrettyPrinter(
- indent=indent, width=width, depth=depth, compact=compact
- ).pformat(object)
diff --git a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py b/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py
deleted file mode 100644
index 379035d858..0000000000
--- a/contrib/python/pytest/py3/_pytest/_io/terminalwriter.py
+++ /dev/null
@@ -1,233 +0,0 @@
-"""Helper functions for writing to terminals and files."""
-import os
-import shutil
-import sys
-from typing import Optional
-from typing import Sequence
-from typing import TextIO
-
-from .wcwidth import wcswidth
-from _pytest.compat import final
-
-
-# This code was initially copied from py 1.8.1, file _io/terminalwriter.py.
-
-
-def get_terminal_width() -> int:
- width, _ = shutil.get_terminal_size(fallback=(80, 24))
-
- # The Windows get_terminal_size may be bogus, let's sanify a bit.
- if width < 40:
- width = 80
-
- return width
-
-
-def should_do_markup(file: TextIO) -> bool:
- if os.environ.get("PY_COLORS") == "1":
- return True
- if os.environ.get("PY_COLORS") == "0":
- return False
- if "NO_COLOR" in os.environ:
- return False
- if "FORCE_COLOR" in os.environ:
- return True
- return (
- hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb"
- )
-
-
-@final
-class TerminalWriter:
- _esctable = dict(
- black=30,
- red=31,
- green=32,
- yellow=33,
- blue=34,
- purple=35,
- cyan=36,
- white=37,
- Black=40,
- Red=41,
- Green=42,
- Yellow=43,
- Blue=44,
- Purple=45,
- Cyan=46,
- White=47,
- bold=1,
- light=2,
- blink=5,
- invert=7,
- )
-
- def __init__(self, file: Optional[TextIO] = None) -> None:
- if file is None:
- file = sys.stdout
- if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32":
- try:
- import colorama
- except ImportError:
- pass
- else:
- file = colorama.AnsiToWin32(file).stream
- assert file is not None
- self._file = file
- self.hasmarkup = should_do_markup(file)
- self._current_line = ""
- self._terminal_width: Optional[int] = None
- self.code_highlight = True
-
- @property
- def fullwidth(self) -> int:
- if self._terminal_width is not None:
- return self._terminal_width
- return get_terminal_width()
-
- @fullwidth.setter
- def fullwidth(self, value: int) -> None:
- self._terminal_width = value
-
- @property
- def width_of_current_line(self) -> int:
- """Return an estimate of the width so far in the current line."""
- return wcswidth(self._current_line)
-
- def markup(self, text: str, **markup: bool) -> str:
- for name in markup:
- if name not in self._esctable:
- raise ValueError(f"unknown markup: {name!r}")
- if self.hasmarkup:
- esc = [self._esctable[name] for name, on in markup.items() if on]
- if esc:
- text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m"
- return text
-
- def sep(
- self,
- sepchar: str,
- title: Optional[str] = None,
- fullwidth: Optional[int] = None,
- **markup: bool,
- ) -> None:
- if fullwidth is None:
- fullwidth = self.fullwidth
- # The goal is to have the line be as long as possible
- # under the condition that len(line) <= fullwidth.
- if sys.platform == "win32":
- # If we print in the last column on windows we are on a
- # new line but there is no way to verify/neutralize this
- # (we may not know the exact line width).
- # So let's be defensive to avoid empty lines in the output.
- fullwidth -= 1
- if title is not None:
- # we want 2 + 2*len(fill) + len(title) <= fullwidth
- # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
- # 2*len(sepchar)*N <= fullwidth - len(title) - 2
- # N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
- N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
- fill = sepchar * N
- line = f"{fill} {title} {fill}"
- else:
- # we want len(sepchar)*N <= fullwidth
- # i.e. N <= fullwidth // len(sepchar)
- line = sepchar * (fullwidth // len(sepchar))
- # In some situations there is room for an extra sepchar at the right,
- # in particular if we consider that with a sepchar like "_ " the
- # trailing space is not important at the end of the line.
- if len(line) + len(sepchar.rstrip()) <= fullwidth:
- line += sepchar.rstrip()
-
- self.line(line, **markup)
-
- def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None:
- if msg:
- current_line = msg.rsplit("\n", 1)[-1]
- if "\n" in msg:
- self._current_line = current_line
- else:
- self._current_line += current_line
-
- msg = self.markup(msg, **markup)
-
- try:
- self._file.write(msg)
- except UnicodeEncodeError:
- # Some environments don't support printing general Unicode
- # strings, due to misconfiguration or otherwise; in that case,
- # print the string escaped to ASCII.
- # When the Unicode situation improves we should consider
- # letting the error propagate instead of masking it (see #7475
- # for one brief attempt).
- msg = msg.encode("unicode-escape").decode("ascii")
- self._file.write(msg)
-
- if flush:
- self.flush()
-
- def line(self, s: str = "", **markup: bool) -> None:
- self.write(s, **markup)
- self.write("\n")
-
- def flush(self) -> None:
- self._file.flush()
-
- def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None:
- """Write lines of source code possibly highlighted.
-
- Keeping this private for now because the API is clunky. We should discuss how
- to evolve the terminal writer so we can have more precise color support, for example
- being able to write part of a line in one color and the rest in another, and so on.
- """
- if indents and len(indents) != len(lines):
- raise ValueError(
- "indents size ({}) should have same size as lines ({})".format(
- len(indents), len(lines)
- )
- )
- if not indents:
- indents = [""] * len(lines)
- source = "\n".join(lines)
- new_lines = self._highlight(source).splitlines()
- for indent, new_line in zip(indents, new_lines):
- self.line(indent + new_line)
-
- def _highlight(self, source: str) -> str:
- """Highlight the given source code if we have markup support."""
- from _pytest.config.exceptions import UsageError
-
- if not self.hasmarkup or not self.code_highlight:
- return source
- try:
- from pygments.formatters.terminal import TerminalFormatter
- from pygments.lexers.python import PythonLexer
- from pygments import highlight
- import pygments.util
- except ImportError:
- return source
- else:
- try:
- highlighted: str = highlight(
- source,
- PythonLexer(),
- TerminalFormatter(
- bg=os.getenv("PYTEST_THEME_MODE", "dark"),
- style=os.getenv("PYTEST_THEME"),
- ),
- )
- return highlighted
- except pygments.util.ClassNotFound:
- raise UsageError(
- "PYTEST_THEME environment variable had an invalid value: '{}'. "
- "Only valid pygment styles are allowed.".format(
- os.getenv("PYTEST_THEME")
- )
- )
- except pygments.util.OptionError:
- raise UsageError(
- "PYTEST_THEME_MODE environment variable had an invalid value: '{}'. "
- "The only allowed values are 'dark' and 'light'.".format(
- os.getenv("PYTEST_THEME_MODE")
- )
- )
diff --git a/contrib/python/pytest/py3/_pytest/_io/wcwidth.py b/contrib/python/pytest/py3/_pytest/_io/wcwidth.py
deleted file mode 100644
index e5c7bf4d86..0000000000
--- a/contrib/python/pytest/py3/_pytest/_io/wcwidth.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import unicodedata
-from functools import lru_cache
-
-
-@lru_cache(100)
-def wcwidth(c: str) -> int:
- """Determine how many columns are needed to display a character in a terminal.
-
- Returns -1 if the character is not printable.
- Returns 0, 1 or 2 for other characters.
- """
- o = ord(c)
-
- # ASCII fast path.
- if 0x20 <= o < 0x07F:
- return 1
-
- # Some Cf/Zp/Zl characters which should be zero-width.
- if (
- o == 0x0000
- or 0x200B <= o <= 0x200F
- or 0x2028 <= o <= 0x202E
- or 0x2060 <= o <= 0x2063
- ):
- return 0
-
- category = unicodedata.category(c)
-
- # Control characters.
- if category == "Cc":
- return -1
-
- # Combining characters with zero width.
- if category in ("Me", "Mn"):
- return 0
-
- # Full/Wide east asian characters.
- if unicodedata.east_asian_width(c) in ("F", "W"):
- return 2
-
- return 1
-
-
-def wcswidth(s: str) -> int:
- """Determine how many columns are needed to display a string in a terminal.
-
- Returns -1 if the string contains non-printable characters.
- """
- width = 0
- for c in unicodedata.normalize("NFC", s):
- wc = wcwidth(c)
- if wc < 0:
- return -1
- width += wc
- return width
diff --git a/contrib/python/pytest/py3/_pytest/_version.py b/contrib/python/pytest/py3/_pytest/_version.py
deleted file mode 100644
index 90941616ba..0000000000
--- a/contrib/python/pytest/py3/_pytest/_version.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# coding: utf-8
-# file generated by setuptools_scm
-# don't change, don't track in version control
-version = '7.1.2'
-version_tuple = (7, 1, 2)
diff --git a/contrib/python/pytest/py3/_pytest/assertion/__init__.py b/contrib/python/pytest/py3/_pytest/assertion/__init__.py
deleted file mode 100644
index 480a26ad86..0000000000
--- a/contrib/python/pytest/py3/_pytest/assertion/__init__.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""Support for presenting detailed information in failing assertions."""
-import sys
-from typing import Any
-from typing import Generator
-from typing import List
-from typing import Optional
-from typing import TYPE_CHECKING
-
-from _pytest.assertion import rewrite
-from _pytest.assertion import truncate
-from _pytest.assertion import util
-from _pytest.assertion.rewrite import assertstate_key
-from _pytest.config import Config
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.nodes import Item
-
-if TYPE_CHECKING:
- from _pytest.main import Session
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("debugconfig")
- group.addoption(
- "--assert",
- action="store",
- dest="assertmode",
- choices=("rewrite", "plain"),
- default="rewrite",
- metavar="MODE",
- help=(
- "Control assertion debugging tools.\n"
- "'plain' performs no assertion debugging.\n"
- "'rewrite' (the default) rewrites assert statements in test modules"
- " on import to provide assert expression information."
- ),
- )
- parser.addini(
- "enable_assertion_pass_hook",
- type="bool",
- default=False,
- help="Enables the pytest_assertion_pass hook."
- "Make sure to delete any previously generated pyc cache files.",
- )
-
-
-def register_assert_rewrite(*names: str) -> None:
- """Register one or more module names to be rewritten on import.
-
- This function will make sure that this module or all modules inside
- the package will get their assert statements rewritten.
- Thus you should make sure to call this before the module is
- actually imported, usually in your __init__.py if you are a plugin
- using a package.
-
- :raises TypeError: If the given module names are not strings.
- """
- for name in names:
- if not isinstance(name, str):
- msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable]
- raise TypeError(msg.format(repr(names)))
- for hook in sys.meta_path:
- if isinstance(hook, rewrite.AssertionRewritingHook):
- importhook = hook
- break
- else:
- # TODO(typing): Add a protocol for mark_rewrite() and use it
- # for importhook and for PytestPluginManager.rewrite_hook.
- importhook = DummyRewriteHook() # type: ignore
- importhook.mark_rewrite(*names)
-
-
-class DummyRewriteHook:
- """A no-op import hook for when rewriting is disabled."""
-
- def mark_rewrite(self, *names: str) -> None:
- pass
-
-
-class AssertionState:
- """State for the assertion plugin."""
-
- def __init__(self, config: Config, mode) -> None:
- self.mode = mode
- self.trace = config.trace.root.get("assertion")
- self.hook: Optional[rewrite.AssertionRewritingHook] = None
-
-
-def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
- """Try to install the rewrite hook, raise SystemError if it fails."""
- config.stash[assertstate_key] = AssertionState(config, "rewrite")
- config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config)
- sys.meta_path.insert(0, hook)
- config.stash[assertstate_key].trace("installed rewrite import hook")
-
- def undo() -> None:
- hook = config.stash[assertstate_key].hook
- if hook is not None and hook in sys.meta_path:
- sys.meta_path.remove(hook)
-
- config.add_cleanup(undo)
- return hook
-
-
-def pytest_collection(session: "Session") -> None:
- # This hook is only called when test modules are collected
- # so for example not in the managing process of pytest-xdist
- # (which does not collect test modules).
- assertstate = session.config.stash.get(assertstate_key, None)
- if assertstate:
- if assertstate.hook is not None:
- assertstate.hook.set_session(session)
-
-
-@hookimpl(tryfirst=True, hookwrapper=True)
-def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
- """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks.
-
- The rewrite module will use util._reprcompare if it exists to use custom
- reporting via the pytest_assertrepr_compare hook. This sets up this custom
- comparison for the test.
- """
-
- ihook = item.ihook
-
- def callbinrepr(op, left: object, right: object) -> Optional[str]:
- """Call the pytest_assertrepr_compare hook and prepare the result.
-
- This uses the first result from the hook and then ensures the
- following:
- * Overly verbose explanations are truncated unless configured otherwise
- (eg. if running in verbose mode).
- * Embedded newlines are escaped to help util.format_explanation()
- later.
- * If the rewrite mode is used embedded %-characters are replaced
- to protect later % formatting.
-
- The result can be formatted by util.format_explanation() for
- pretty printing.
- """
- hook_result = ihook.pytest_assertrepr_compare(
- config=item.config, op=op, left=left, right=right
- )
- for new_expl in hook_result:
- if new_expl:
- new_expl = truncate.truncate_if_required(new_expl, item)
- new_expl = [line.replace("\n", "\\n") for line in new_expl]
- res = "\n~".join(new_expl)
- if item.config.getvalue("assertmode") == "rewrite":
- res = res.replace("%", "%%")
- return res
- return None
-
- saved_assert_hooks = util._reprcompare, util._assertion_pass
- util._reprcompare = callbinrepr
- util._config = item.config
-
- if ihook.pytest_assertion_pass.get_hookimpls():
-
- def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None:
- ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl)
-
- util._assertion_pass = call_assertion_pass_hook
-
- yield
-
- util._reprcompare, util._assertion_pass = saved_assert_hooks
- util._config = None
-
-
-def pytest_sessionfinish(session: "Session") -> None:
- assertstate = session.config.stash.get(assertstate_key, None)
- if assertstate:
- if assertstate.hook is not None:
- assertstate.hook.set_session(None)
-
-
-def pytest_assertrepr_compare(
- config: Config, op: str, left: Any, right: Any
-) -> Optional[List[str]]:
- return util.assertrepr_compare(config=config, op=op, left=left, right=right)
diff --git a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py b/contrib/python/pytest/py3/_pytest/assertion/rewrite.py
deleted file mode 100644
index 81096764e0..0000000000
--- a/contrib/python/pytest/py3/_pytest/assertion/rewrite.py
+++ /dev/null
@@ -1,1129 +0,0 @@
-"""Rewrite assertion AST to produce nice error messages."""
-import ast
-import errno
-import functools
-import importlib.abc
-import importlib.machinery
-import importlib.util
-import io
-import itertools
-import marshal
-import os
-import struct
-import sys
-import tokenize
-import types
-from pathlib import Path
-from pathlib import PurePath
-from typing import Callable
-from typing import Dict
-from typing import IO
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE
-from _pytest._io.saferepr import saferepr
-from _pytest._version import version
-from _pytest.assertion import util
-from _pytest.assertion.util import ( # noqa: F401
- format_explanation as _format_explanation,
-)
-from _pytest.config import Config
-from _pytest.main import Session
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import fnmatch_ex
-from _pytest.stash import StashKey
-
-if TYPE_CHECKING:
- from _pytest.assertion import AssertionState
-
-
-assertstate_key = StashKey["AssertionState"]()
-
-
-# pytest caches rewritten pycs in pycache dirs
-PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}"
-PYC_EXT = ".py" + (__debug__ and "c" or "o")
-PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
-
-
-class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader):
- """PEP302/PEP451 import hook which rewrites asserts."""
-
- def __init__(self, config: Config) -> None:
- self.config = config
- try:
- self.fnpats = config.getini("python_files")
- except ValueError:
- self.fnpats = ["test_*.py", "*_test.py"]
- self.session: Optional[Session] = None
- self._rewritten_names: Dict[str, Path] = {}
- self._must_rewrite: Set[str] = set()
- # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
- # which might result in infinite recursion (#3506)
- self._writing_pyc = False
- self._basenames_to_check_rewrite = {"conftest"}
- self._marked_for_rewrite_cache: Dict[str, bool] = {}
- self._session_paths_checked = False
-
- def set_session(self, session: Optional[Session]) -> None:
- self.session = session
- self._session_paths_checked = False
-
- # Indirection so we can mock calls to find_spec originated from the hook during testing
- _find_spec = importlib.machinery.PathFinder.find_spec
-
- def find_spec(
- self,
- name: str,
- path: Optional[Sequence[Union[str, bytes]]] = None,
- target: Optional[types.ModuleType] = None,
- ) -> Optional[importlib.machinery.ModuleSpec]:
- if self._writing_pyc:
- return None
- state = self.config.stash[assertstate_key]
- if self._early_rewrite_bailout(name, state):
- return None
- state.trace("find_module called for: %s" % name)
-
- # Type ignored because mypy is confused about the `self` binding here.
- spec = self._find_spec(name, path) # type: ignore
- if (
- # the import machinery could not find a file to import
- spec is None
- # this is a namespace package (without `__init__.py`)
- # there's nothing to rewrite there
- or spec.origin is None
- # we can only rewrite source files
- or not isinstance(spec.loader, importlib.machinery.SourceFileLoader)
- # if the file doesn't exist, we can't rewrite it
- or not os.path.exists(spec.origin)
- ):
- return None
- else:
- fn = spec.origin
-
- if not self._should_rewrite(name, fn, state):
- return None
-
- return importlib.util.spec_from_file_location(
- name,
- fn,
- loader=self,
- submodule_search_locations=spec.submodule_search_locations,
- )
-
- def create_module(
- self, spec: importlib.machinery.ModuleSpec
- ) -> Optional[types.ModuleType]:
- return None # default behaviour is fine
-
- def exec_module(self, module: types.ModuleType) -> None:
- assert module.__spec__ is not None
- assert module.__spec__.origin is not None
- fn = Path(module.__spec__.origin)
- state = self.config.stash[assertstate_key]
-
- self._rewritten_names[module.__name__] = fn
-
- # The requested module looks like a test file, so rewrite it. This is
- # the most magical part of the process: load the source, rewrite the
- # asserts, and load the rewritten source. We also cache the rewritten
- # module code in a special pyc. We must be aware of the possibility of
- # concurrent pytest processes rewriting and loading pycs. To avoid
- # tricky race conditions, we maintain the following invariant: The
- # cached pyc is always a complete, valid pyc. Operations on it must be
- # atomic. POSIX's atomic rename comes in handy.
- write = not sys.dont_write_bytecode
- cache_dir = get_cache_dir(fn)
- if write:
- ok = try_makedirs(cache_dir)
- if not ok:
- write = False
- state.trace(f"read only directory: {cache_dir}")
-
- cache_name = fn.name[:-3] + PYC_TAIL
- pyc = cache_dir / cache_name
- # Notice that even if we're in a read-only directory, I'm going
- # to check for a cached pyc. This may not be optimal...
- co = _read_pyc(fn, pyc, state.trace)
- if co is None:
- state.trace(f"rewriting {fn!r}")
- source_stat, co = _rewrite_test(fn, self.config)
- if write:
- self._writing_pyc = True
- try:
- _write_pyc(state, co, source_stat, pyc)
- finally:
- self._writing_pyc = False
- else:
- state.trace(f"found cached rewritten pyc for {fn}")
- exec(co, module.__dict__)
-
- def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool:
- """A fast way to get out of rewriting modules.
-
- Profiling has shown that the call to PathFinder.find_spec (inside of
- the find_spec from this class) is a major slowdown, so, this method
- tries to filter what we're sure won't be rewritten before getting to
- it.
- """
- if self.session is not None and not self._session_paths_checked:
- self._session_paths_checked = True
- for initial_path in self.session._initialpaths:
- # Make something as c:/projects/my_project/path.py ->
- # ['c:', 'projects', 'my_project', 'path.py']
- parts = str(initial_path).split(os.path.sep)
- # add 'path' to basenames to be checked.
- self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0])
-
- # Note: conftest already by default in _basenames_to_check_rewrite.
- parts = name.split(".")
- if parts[-1] in self._basenames_to_check_rewrite:
- return False
-
- # For matching the name it must be as if it was a filename.
- path = PurePath(os.path.sep.join(parts) + ".py")
-
- for pat in self.fnpats:
- # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
- # on the name alone because we need to match against the full path
- if os.path.dirname(pat):
- return False
- if fnmatch_ex(pat, path):
- return False
-
- if self._is_marked_for_rewrite(name, state):
- return False
-
- state.trace(f"early skip of rewriting module: {name}")
- return True
-
- def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool:
- # always rewrite conftest files
- if os.path.basename(fn) == "conftest.py":
- state.trace(f"rewriting conftest file: {fn!r}")
- return True
-
- if self.session is not None:
- if self.session.isinitpath(absolutepath(fn)):
- state.trace(f"matched test file (was specified on cmdline): {fn!r}")
- return True
-
- # modules not passed explicitly on the command line are only
- # rewritten if they match the naming convention for test files
- fn_path = PurePath(fn)
- for pat in self.fnpats:
- if fnmatch_ex(pat, fn_path):
- state.trace(f"matched test file {fn!r}")
- return True
-
- return self._is_marked_for_rewrite(name, state)
-
- def _is_marked_for_rewrite(self, name: str, state: "AssertionState") -> bool:
- try:
- return self._marked_for_rewrite_cache[name]
- except KeyError:
- for marked in self._must_rewrite:
- if name == marked or name.startswith(marked + "."):
- state.trace(f"matched marked file {name!r} (from {marked!r})")
- self._marked_for_rewrite_cache[name] = True
- return True
-
- self._marked_for_rewrite_cache[name] = False
- return False
-
- def mark_rewrite(self, *names: str) -> None:
- """Mark import names as needing to be rewritten.
-
- The named module or package as well as any nested modules will
- be rewritten on import.
- """
- already_imported = (
- set(names).intersection(sys.modules).difference(self._rewritten_names)
- )
- for name in already_imported:
- mod = sys.modules[name]
- if not AssertionRewriter.is_rewrite_disabled(
- mod.__doc__ or ""
- ) and not isinstance(mod.__loader__, type(self)):
- self._warn_already_imported(name)
- self._must_rewrite.update(names)
- self._marked_for_rewrite_cache.clear()
-
- def _warn_already_imported(self, name: str) -> None:
- from _pytest.warning_types import PytestAssertRewriteWarning
-
- self.config.issue_config_time_warning(
- PytestAssertRewriteWarning(
- "Module already imported so cannot be rewritten: %s" % name
- ),
- stacklevel=5,
- )
-
- def get_data(self, pathname: Union[str, bytes]) -> bytes:
- """Optional PEP302 get_data API."""
- with open(pathname, "rb") as f:
- return f.read()
-
- if sys.version_info >= (3, 10):
-
- def get_resource_reader(self, name: str) -> importlib.abc.TraversableResources: # type: ignore
- if sys.version_info < (3, 11):
- from importlib.readers import FileReader
- else:
- from importlib.resources.readers import FileReader
-
- return FileReader(types.SimpleNamespace(path=self._rewritten_names[name]))
-
-
-def _write_pyc_fp(
- fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType
-) -> None:
- # Technically, we don't have to have the same pyc format as
- # (C)Python, since these "pycs" should never be seen by builtin
- # import. However, there's little reason to deviate.
- fp.write(importlib.util.MAGIC_NUMBER)
- # https://www.python.org/dev/peps/pep-0552/
- flags = b"\x00\x00\x00\x00"
- fp.write(flags)
- # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
- mtime = int(source_stat.st_mtime) & 0xFFFFFFFF
- size = source_stat.st_size & 0xFFFFFFFF
- # "<LL" stands for 2 unsigned longs, little-endian.
- fp.write(struct.pack("<LL", mtime, size))
- fp.write(marshal.dumps(co))
-
-
-if sys.platform == "win32":
- from atomicwrites import atomic_write
-
- def _write_pyc(
- state: "AssertionState",
- co: types.CodeType,
- source_stat: os.stat_result,
- pyc: Path,
- ) -> bool:
- try:
- with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp:
- _write_pyc_fp(fp, source_stat, co)
- except OSError as e:
- state.trace(f"error writing pyc file at {pyc}: {e}")
- # we ignore any failure to write the cache file
- # there are many reasons, permission-denied, pycache dir being a
- # file etc.
- return False
- return True
-
-else:
-
- def _write_pyc(
- state: "AssertionState",
- co: types.CodeType,
- source_stat: os.stat_result,
- pyc: Path,
- ) -> bool:
- proc_pyc = f"{pyc}.{os.getpid()}"
- try:
- fp = open(proc_pyc, "wb")
- except OSError as e:
- state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
- return False
-
- try:
- _write_pyc_fp(fp, source_stat, co)
- os.rename(proc_pyc, pyc)
- except OSError as e:
- state.trace(f"error writing pyc file at {pyc}: {e}")
- # we ignore any failure to write the cache file
- # there are many reasons, permission-denied, pycache dir being a
- # file etc.
- return False
- finally:
- fp.close()
- return True
-
-
-def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]:
- """Read and rewrite *fn* and return the code object."""
- stat = os.stat(fn)
- source = fn.read_bytes()
- strfn = str(fn)
- tree = ast.parse(source, filename=strfn)
- rewrite_asserts(tree, source, strfn, config)
- co = compile(tree, strfn, "exec", dont_inherit=True)
- return stat, co
-
-
-def _read_pyc(
- source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None
-) -> Optional[types.CodeType]:
- """Possibly read a pytest pyc containing rewritten code.
-
- Return rewritten code if successful or None if not.
- """
- try:
- fp = open(pyc, "rb")
- except OSError:
- return None
- with fp:
- try:
- stat_result = os.stat(source)
- mtime = int(stat_result.st_mtime)
- size = stat_result.st_size
- data = fp.read(16)
- except OSError as e:
- trace(f"_read_pyc({source}): OSError {e}")
- return None
- # Check for invalid or out of date pyc file.
- if len(data) != (16):
- trace("_read_pyc(%s): invalid pyc (too short)" % source)
- return None
- if data[:4] != importlib.util.MAGIC_NUMBER:
- trace("_read_pyc(%s): invalid pyc (bad magic number)" % source)
- return None
- if data[4:8] != b"\x00\x00\x00\x00":
- trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source)
- return None
- mtime_data = data[8:12]
- if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF:
- trace("_read_pyc(%s): out of date" % source)
- return None
- size_data = data[12:16]
- if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF:
- trace("_read_pyc(%s): invalid pyc (incorrect size)" % source)
- return None
- try:
- co = marshal.load(fp)
- except Exception as e:
- trace(f"_read_pyc({source}): marshal.load error {e}")
- return None
- if not isinstance(co, types.CodeType):
- trace("_read_pyc(%s): not a code object" % source)
- return None
- return co
-
-
-def rewrite_asserts(
- mod: ast.Module,
- source: bytes,
- module_path: Optional[str] = None,
- config: Optional[Config] = None,
-) -> None:
- """Rewrite the assert statements in mod."""
- AssertionRewriter(module_path, config, source).run(mod)
-
-
-def _saferepr(obj: object) -> str:
- r"""Get a safe repr of an object for assertion error messages.
-
- The assertion formatting (util.format_explanation()) requires
- newlines to be escaped since they are a special character for it.
- Normally assertion.util.format_explanation() does this but for a
- custom repr it is possible to contain one of the special escape
- sequences, especially '\n{' and '\n}' are likely to be present in
- JSON reprs.
- """
- maxsize = _get_maxsize_for_saferepr(util._config)
- return saferepr(obj, maxsize=maxsize).replace("\n", "\\n")
-
-
-def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]:
- """Get `maxsize` configuration for saferepr based on the given config object."""
- verbosity = config.getoption("verbose") if config is not None else 0
- if verbosity >= 2:
- return None
- if verbosity >= 1:
- return DEFAULT_REPR_MAX_SIZE * 10
- return DEFAULT_REPR_MAX_SIZE
-
-
-def _format_assertmsg(obj: object) -> str:
- r"""Format the custom assertion message given.
-
- For strings this simply replaces newlines with '\n~' so that
- util.format_explanation() will preserve them instead of escaping
- newlines. For other objects saferepr() is used first.
- """
- # reprlib appears to have a bug which means that if a string
- # contains a newline it gets escaped, however if an object has a
- # .__repr__() which contains newlines it does not get escaped.
- # However in either case we want to preserve the newline.
- replaces = [("\n", "\n~"), ("%", "%%")]
- if not isinstance(obj, str):
- obj = saferepr(obj)
- replaces.append(("\\n", "\n~"))
-
- for r1, r2 in replaces:
- obj = obj.replace(r1, r2)
-
- return obj
-
-
-def _should_repr_global_name(obj: object) -> bool:
- if callable(obj):
- return False
-
- try:
- return not hasattr(obj, "__name__")
- except Exception:
- return True
-
-
-def _format_boolop(explanations: Iterable[str], is_or: bool) -> str:
- explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
- return explanation.replace("%", "%%")
-
-
-def _call_reprcompare(
- ops: Sequence[str],
- results: Sequence[bool],
- expls: Sequence[str],
- each_obj: Sequence[object],
-) -> str:
- for i, res, expl in zip(range(len(ops)), results, expls):
- try:
- done = not res
- except Exception:
- done = True
- if done:
- break
- if util._reprcompare is not None:
- custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
- if custom is not None:
- return custom
- return expl
-
-
-def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None:
- if util._assertion_pass is not None:
- util._assertion_pass(lineno, orig, expl)
-
-
-def _check_if_assertion_pass_impl() -> bool:
- """Check if any plugins implement the pytest_assertion_pass hook
- in order not to generate explanation unnecessarily (might be expensive)."""
- return True if util._assertion_pass else False
-
-
-UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
-
-BINOP_MAP = {
- ast.BitOr: "|",
- ast.BitXor: "^",
- ast.BitAnd: "&",
- ast.LShift: "<<",
- ast.RShift: ">>",
- ast.Add: "+",
- ast.Sub: "-",
- ast.Mult: "*",
- ast.Div: "/",
- ast.FloorDiv: "//",
- ast.Mod: "%%", # escaped for string formatting
- ast.Eq: "==",
- ast.NotEq: "!=",
- ast.Lt: "<",
- ast.LtE: "<=",
- ast.Gt: ">",
- ast.GtE: ">=",
- ast.Pow: "**",
- ast.Is: "is",
- ast.IsNot: "is not",
- ast.In: "in",
- ast.NotIn: "not in",
- ast.MatMult: "@",
-}
-
-
-def traverse_node(node: ast.AST) -> Iterator[ast.AST]:
- """Recursively yield node and all its children in depth-first order."""
- yield node
- for child in ast.iter_child_nodes(node):
- yield from traverse_node(child)
-
-
-@functools.lru_cache(maxsize=1)
-def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
- """Return a mapping from {lineno: "assertion test expression"}."""
- ret: Dict[int, str] = {}
-
- depth = 0
- lines: List[str] = []
- assert_lineno: Optional[int] = None
- seen_lines: Set[int] = set()
-
- def _write_and_reset() -> None:
- nonlocal depth, lines, assert_lineno, seen_lines
- assert assert_lineno is not None
- ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\")
- depth = 0
- lines = []
- assert_lineno = None
- seen_lines = set()
-
- tokens = tokenize.tokenize(io.BytesIO(src).readline)
- for tp, source, (lineno, offset), _, line in tokens:
- if tp == tokenize.NAME and source == "assert":
- assert_lineno = lineno
- elif assert_lineno is not None:
- # keep track of depth for the assert-message `,` lookup
- if tp == tokenize.OP and source in "([{":
- depth += 1
- elif tp == tokenize.OP and source in ")]}":
- depth -= 1
-
- if not lines:
- lines.append(line[offset:])
- seen_lines.add(lineno)
- # a non-nested comma separates the expression from the message
- elif depth == 0 and tp == tokenize.OP and source == ",":
- # one line assert with message
- if lineno in seen_lines and len(lines) == 1:
- offset_in_trimmed = offset + len(lines[-1]) - len(line)
- lines[-1] = lines[-1][:offset_in_trimmed]
- # multi-line assert with message
- elif lineno in seen_lines:
- lines[-1] = lines[-1][:offset]
- # multi line assert with escapd newline before message
- else:
- lines.append(line[:offset])
- _write_and_reset()
- elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}:
- _write_and_reset()
- elif lines and lineno not in seen_lines:
- lines.append(line)
- seen_lines.add(lineno)
-
- return ret
-
-
-class AssertionRewriter(ast.NodeVisitor):
- """Assertion rewriting implementation.
-
- The main entrypoint is to call .run() with an ast.Module instance,
- this will then find all the assert statements and rewrite them to
- provide intermediate values and a detailed assertion error. See
- http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
- for an overview of how this works.
-
- The entry point here is .run() which will iterate over all the
- statements in an ast.Module and for each ast.Assert statement it
- finds call .visit() with it. Then .visit_Assert() takes over and
- is responsible for creating new ast statements to replace the
- original assert statement: it rewrites the test of an assertion
- to provide intermediate values and replace it with an if statement
- which raises an assertion error with a detailed explanation in
- case the expression is false and calls pytest_assertion_pass hook
- if expression is true.
-
- For this .visit_Assert() uses the visitor pattern to visit all the
- AST nodes of the ast.Assert.test field, each visit call returning
- an AST node and the corresponding explanation string. During this
- state is kept in several instance attributes:
-
- :statements: All the AST statements which will replace the assert
- statement.
-
- :variables: This is populated by .variable() with each variable
- used by the statements so that they can all be set to None at
- the end of the statements.
-
- :variable_counter: Counter to create new unique variables needed
- by statements. Variables are created using .variable() and
- have the form of "@py_assert0".
-
- :expl_stmts: The AST statements which will be executed to get
- data from the assertion. This is the code which will construct
- the detailed assertion message that is used in the AssertionError
- or for the pytest_assertion_pass hook.
-
- :explanation_specifiers: A dict filled by .explanation_param()
- with %-formatting placeholders and their corresponding
- expressions to use in the building of an assertion message.
- This is used by .pop_format_context() to build a message.
-
- :stack: A stack of the explanation_specifiers dicts maintained by
- .push_format_context() and .pop_format_context() which allows
- to build another %-formatted string while already building one.
-
- This state is reset on every new assert statement visited and used
- by the other visitors.
- """
-
- def __init__(
- self, module_path: Optional[str], config: Optional[Config], source: bytes
- ) -> None:
- super().__init__()
- self.module_path = module_path
- self.config = config
- if config is not None:
- self.enable_assertion_pass_hook = config.getini(
- "enable_assertion_pass_hook"
- )
- else:
- self.enable_assertion_pass_hook = False
- self.source = source
-
- def run(self, mod: ast.Module) -> None:
- """Find all assert statements in *mod* and rewrite them."""
- if not mod.body:
- # Nothing to do.
- return
-
- # We'll insert some special imports at the top of the module, but after any
- # docstrings and __future__ imports, so first figure out where that is.
- doc = getattr(mod, "docstring", None)
- expect_docstring = doc is None
- if doc is not None and self.is_rewrite_disabled(doc):
- return
- pos = 0
- lineno = 1
- for item in mod.body:
- if (
- expect_docstring
- and isinstance(item, ast.Expr)
- and isinstance(item.value, ast.Str)
- ):
- doc = item.value.s
- if self.is_rewrite_disabled(doc):
- return
- expect_docstring = False
- elif (
- isinstance(item, ast.ImportFrom)
- and item.level == 0
- and item.module == "__future__"
- ):
- pass
- else:
- break
- pos += 1
- # Special case: for a decorated function, set the lineno to that of the
- # first decorator, not the `def`. Issue #4984.
- if isinstance(item, ast.FunctionDef) and item.decorator_list:
- lineno = item.decorator_list[0].lineno
- else:
- lineno = item.lineno
- # Now actually insert the special imports.
- if sys.version_info >= (3, 10):
- aliases = [
- ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0),
- ast.alias(
- "_pytest.assertion.rewrite",
- "@pytest_ar",
- lineno=lineno,
- col_offset=0,
- ),
- ]
- else:
- aliases = [
- ast.alias("builtins", "@py_builtins"),
- ast.alias("_pytest.assertion.rewrite", "@pytest_ar"),
- ]
- imports = [
- ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
- ]
- mod.body[pos:pos] = imports
-
- # Collect asserts.
- nodes: List[ast.AST] = [mod]
- while nodes:
- node = nodes.pop()
- for name, field in ast.iter_fields(node):
- if isinstance(field, list):
- new: List[ast.AST] = []
- for i, child in enumerate(field):
- if isinstance(child, ast.Assert):
- # Transform assert.
- new.extend(self.visit(child))
- else:
- new.append(child)
- if isinstance(child, ast.AST):
- nodes.append(child)
- setattr(node, name, new)
- elif (
- isinstance(field, ast.AST)
- # Don't recurse into expressions as they can't contain
- # asserts.
- and not isinstance(field, ast.expr)
- ):
- nodes.append(field)
-
- @staticmethod
- def is_rewrite_disabled(docstring: str) -> bool:
- return "PYTEST_DONT_REWRITE" in docstring
-
- def variable(self) -> str:
- """Get a new variable."""
- # Use a character invalid in python identifiers to avoid clashing.
- name = "@py_assert" + str(next(self.variable_counter))
- self.variables.append(name)
- return name
-
- def assign(self, expr: ast.expr) -> ast.Name:
- """Give *expr* a name."""
- name = self.variable()
- self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
- return ast.Name(name, ast.Load())
-
- def display(self, expr: ast.expr) -> ast.expr:
- """Call saferepr on the expression."""
- return self.helper("_saferepr", expr)
-
- def helper(self, name: str, *args: ast.expr) -> ast.expr:
- """Call a helper in this module."""
- py_name = ast.Name("@pytest_ar", ast.Load())
- attr = ast.Attribute(py_name, name, ast.Load())
- return ast.Call(attr, list(args), [])
-
- def builtin(self, name: str) -> ast.Attribute:
- """Return the builtin called *name*."""
- builtin_name = ast.Name("@py_builtins", ast.Load())
- return ast.Attribute(builtin_name, name, ast.Load())
-
- def explanation_param(self, expr: ast.expr) -> str:
- """Return a new named %-formatting placeholder for expr.
-
- This creates a %-formatting placeholder for expr in the
- current formatting context, e.g. ``%(py0)s``. The placeholder
- and expr are placed in the current format context so that it
- can be used on the next call to .pop_format_context().
- """
- specifier = "py" + str(next(self.variable_counter))
- self.explanation_specifiers[specifier] = expr
- return "%(" + specifier + ")s"
-
- def push_format_context(self) -> None:
- """Create a new formatting context.
-
- The format context is used for when an explanation wants to
- have a variable value formatted in the assertion message. In
- this case the value required can be added using
- .explanation_param(). Finally .pop_format_context() is used
- to format a string of %-formatted values as added by
- .explanation_param().
- """
- self.explanation_specifiers: Dict[str, ast.expr] = {}
- self.stack.append(self.explanation_specifiers)
-
- def pop_format_context(self, expl_expr: ast.expr) -> ast.Name:
- """Format the %-formatted string with current format context.
-
- The expl_expr should be an str ast.expr instance constructed from
- the %-placeholders created by .explanation_param(). This will
- add the required code to format said string to .expl_stmts and
- return the ast.Name instance of the formatted string.
- """
- current = self.stack.pop()
- if self.stack:
- self.explanation_specifiers = self.stack[-1]
- keys = [ast.Str(key) for key in current.keys()]
- format_dict = ast.Dict(keys, list(current.values()))
- form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
- name = "@py_format" + str(next(self.variable_counter))
- if self.enable_assertion_pass_hook:
- self.format_variables.append(name)
- self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form))
- return ast.Name(name, ast.Load())
-
- def generic_visit(self, node: ast.AST) -> Tuple[ast.Name, str]:
- """Handle expressions we don't have custom code for."""
- assert isinstance(node, ast.expr)
- res = self.assign(node)
- return res, self.explanation_param(self.display(res))
-
- def visit_Assert(self, assert_: ast.Assert) -> List[ast.stmt]:
- """Return the AST statements to replace the ast.Assert instance.
-
- This rewrites the test of an assertion to provide
- intermediate values and replace it with an if statement which
- raises an assertion error with a detailed explanation in case
- the expression is false.
- """
- if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
- from _pytest.warning_types import PytestAssertRewriteWarning
- import warnings
-
- # TODO: This assert should not be needed.
- assert self.module_path is not None
- warnings.warn_explicit(
- PytestAssertRewriteWarning(
- "assertion is always true, perhaps remove parentheses?"
- ),
- category=None,
- filename=self.module_path,
- lineno=assert_.lineno,
- )
-
- self.statements: List[ast.stmt] = []
- self.variables: List[str] = []
- self.variable_counter = itertools.count()
-
- if self.enable_assertion_pass_hook:
- self.format_variables: List[str] = []
-
- self.stack: List[Dict[str, ast.expr]] = []
- self.expl_stmts: List[ast.stmt] = []
- self.push_format_context()
- # Rewrite assert into a bunch of statements.
- top_condition, explanation = self.visit(assert_.test)
-
- negation = ast.UnaryOp(ast.Not(), top_condition)
-
- if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook
- msg = self.pop_format_context(ast.Str(explanation))
-
- # Failed
- if assert_.msg:
- assertmsg = self.helper("_format_assertmsg", assert_.msg)
- gluestr = "\n>assert "
- else:
- assertmsg = ast.Str("")
- gluestr = "assert "
- err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg)
- err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation)
- err_name = ast.Name("AssertionError", ast.Load())
- fmt = self.helper("_format_explanation", err_msg)
- exc = ast.Call(err_name, [fmt], [])
- raise_ = ast.Raise(exc, None)
- statements_fail = []
- statements_fail.extend(self.expl_stmts)
- statements_fail.append(raise_)
-
- # Passed
- fmt_pass = self.helper("_format_explanation", msg)
- orig = _get_assertion_exprs(self.source)[assert_.lineno]
- hook_call_pass = ast.Expr(
- self.helper(
- "_call_assertion_pass",
- ast.Num(assert_.lineno),
- ast.Str(orig),
- fmt_pass,
- )
- )
- # If any hooks implement assert_pass hook
- hook_impl_test = ast.If(
- self.helper("_check_if_assertion_pass_impl"),
- self.expl_stmts + [hook_call_pass],
- [],
- )
- statements_pass = [hook_impl_test]
-
- # Test for assertion condition
- main_test = ast.If(negation, statements_fail, statements_pass)
- self.statements.append(main_test)
- if self.format_variables:
- variables = [
- ast.Name(name, ast.Store()) for name in self.format_variables
- ]
- clear_format = ast.Assign(variables, ast.NameConstant(None))
- self.statements.append(clear_format)
-
- else: # Original assertion rewriting
- # Create failure message.
- body = self.expl_stmts
- self.statements.append(ast.If(negation, body, []))
- if assert_.msg:
- assertmsg = self.helper("_format_assertmsg", assert_.msg)
- explanation = "\n>assert " + explanation
- else:
- assertmsg = ast.Str("")
- explanation = "assert " + explanation
- template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
- msg = self.pop_format_context(template)
- fmt = self.helper("_format_explanation", msg)
- err_name = ast.Name("AssertionError", ast.Load())
- exc = ast.Call(err_name, [fmt], [])
- raise_ = ast.Raise(exc, None)
-
- body.append(raise_)
-
- # Clear temporary variables by setting them to None.
- if self.variables:
- variables = [ast.Name(name, ast.Store()) for name in self.variables]
- clear = ast.Assign(variables, ast.NameConstant(None))
- self.statements.append(clear)
- # Fix locations (line numbers/column offsets).
- for stmt in self.statements:
- for node in traverse_node(stmt):
- ast.copy_location(node, assert_)
- return self.statements
-
- def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]:
- # Display the repr of the name if it's a local variable or
- # _should_repr_global_name() thinks it's acceptable.
- locs = ast.Call(self.builtin("locals"), [], [])
- inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
- dorepr = self.helper("_should_repr_global_name", name)
- test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
- expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
- return name, self.explanation_param(expr)
-
- def visit_BoolOp(self, boolop: ast.BoolOp) -> Tuple[ast.Name, str]:
- res_var = self.variable()
- expl_list = self.assign(ast.List([], ast.Load()))
- app = ast.Attribute(expl_list, "append", ast.Load())
- is_or = int(isinstance(boolop.op, ast.Or))
- body = save = self.statements
- fail_save = self.expl_stmts
- levels = len(boolop.values) - 1
- self.push_format_context()
- # Process each operand, short-circuiting if needed.
- for i, v in enumerate(boolop.values):
- if i:
- fail_inner: List[ast.stmt] = []
- # cond is set in a prior loop iteration below
- self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
- self.expl_stmts = fail_inner
- self.push_format_context()
- res, expl = self.visit(v)
- body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
- expl_format = self.pop_format_context(ast.Str(expl))
- call = ast.Call(app, [expl_format], [])
- self.expl_stmts.append(ast.Expr(call))
- if i < levels:
- cond: ast.expr = res
- if is_or:
- cond = ast.UnaryOp(ast.Not(), cond)
- inner: List[ast.stmt] = []
- self.statements.append(ast.If(cond, inner, []))
- self.statements = body = inner
- self.statements = save
- self.expl_stmts = fail_save
- expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or))
- expl = self.pop_format_context(expl_template)
- return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
-
- def visit_UnaryOp(self, unary: ast.UnaryOp) -> Tuple[ast.Name, str]:
- pattern = UNARY_MAP[unary.op.__class__]
- operand_res, operand_expl = self.visit(unary.operand)
- res = self.assign(ast.UnaryOp(unary.op, operand_res))
- return res, pattern % (operand_expl,)
-
- def visit_BinOp(self, binop: ast.BinOp) -> Tuple[ast.Name, str]:
- symbol = BINOP_MAP[binop.op.__class__]
- left_expr, left_expl = self.visit(binop.left)
- right_expr, right_expl = self.visit(binop.right)
- explanation = f"({left_expl} {symbol} {right_expl})"
- res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
- return res, explanation
-
- def visit_Call(self, call: ast.Call) -> Tuple[ast.Name, str]:
- new_func, func_expl = self.visit(call.func)
- arg_expls = []
- new_args = []
- new_kwargs = []
- for arg in call.args:
- res, expl = self.visit(arg)
- arg_expls.append(expl)
- new_args.append(res)
- for keyword in call.keywords:
- res, expl = self.visit(keyword.value)
- new_kwargs.append(ast.keyword(keyword.arg, res))
- if keyword.arg:
- arg_expls.append(keyword.arg + "=" + expl)
- else: # **args have `arg` keywords with an .arg of None
- arg_expls.append("**" + expl)
-
- expl = "{}({})".format(func_expl, ", ".join(arg_expls))
- new_call = ast.Call(new_func, new_args, new_kwargs)
- res = self.assign(new_call)
- res_expl = self.explanation_param(self.display(res))
- outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}"
- return res, outer_expl
-
- def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]:
- # A Starred node can appear in a function call.
- res, expl = self.visit(starred.value)
- new_starred = ast.Starred(res, starred.ctx)
- return new_starred, "*" + expl
-
- def visit_Attribute(self, attr: ast.Attribute) -> Tuple[ast.Name, str]:
- if not isinstance(attr.ctx, ast.Load):
- return self.generic_visit(attr)
- value, value_expl = self.visit(attr.value)
- res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
- res_expl = self.explanation_param(self.display(res))
- pat = "%s\n{%s = %s.%s\n}"
- expl = pat % (res_expl, res_expl, value_expl, attr.attr)
- return res, expl
-
- def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]:
- self.push_format_context()
- left_res, left_expl = self.visit(comp.left)
- if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
- left_expl = f"({left_expl})"
- res_variables = [self.variable() for i in range(len(comp.ops))]
- load_names = [ast.Name(v, ast.Load()) for v in res_variables]
- store_names = [ast.Name(v, ast.Store()) for v in res_variables]
- it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
- expls = []
- syms = []
- results = [left_res]
- for i, op, next_operand in it:
- next_res, next_expl = self.visit(next_operand)
- if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
- next_expl = f"({next_expl})"
- results.append(next_res)
- sym = BINOP_MAP[op.__class__]
- syms.append(ast.Str(sym))
- expl = f"{left_expl} {sym} {next_expl}"
- expls.append(ast.Str(expl))
- res_expr = ast.Compare(left_res, [op], [next_res])
- self.statements.append(ast.Assign([store_names[i]], res_expr))
- left_res, left_expl = next_res, next_expl
- # Use pytest.assertion.util._reprcompare if that's available.
- expl_call = self.helper(
- "_call_reprcompare",
- ast.Tuple(syms, ast.Load()),
- ast.Tuple(load_names, ast.Load()),
- ast.Tuple(expls, ast.Load()),
- ast.Tuple(results, ast.Load()),
- )
- if len(comp.ops) > 1:
- res: ast.expr = ast.BoolOp(ast.And(), load_names)
- else:
- res = load_names[0]
- return res, self.explanation_param(self.pop_format_context(expl_call))
-
-
-def try_makedirs(cache_dir: Path) -> bool:
- """Attempt to create the given directory and sub-directories exist.
-
- Returns True if successful or if it already exists.
- """
- try:
- os.makedirs(cache_dir, exist_ok=True)
- except (FileNotFoundError, NotADirectoryError, FileExistsError):
- # One of the path components was not a directory:
- # - we're in a zip file
- # - it is a file
- return False
- except PermissionError:
- return False
- except OSError as e:
- # as of now, EROFS doesn't have an equivalent OSError-subclass
- if e.errno == errno.EROFS:
- return False
- raise
- return True
-
-
-def get_cache_dir(file_path: Path) -> Path:
- """Return the cache directory to write .pyc files for the given .py file path."""
- if sys.version_info >= (3, 8) and sys.pycache_prefix:
- # given:
- # prefix = '/tmp/pycs'
- # path = '/home/user/proj/test_app.py'
- # we want:
- # '/tmp/pycs/home/user/proj'
- return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1])
- else:
- # classic pycache directory
- return file_path.parent / "__pycache__"
diff --git a/contrib/python/pytest/py3/_pytest/assertion/truncate.py b/contrib/python/pytest/py3/_pytest/assertion/truncate.py
deleted file mode 100644
index ce148dca09..0000000000
--- a/contrib/python/pytest/py3/_pytest/assertion/truncate.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""Utilities for truncating assertion output.
-
-Current default behaviour is to truncate assertion explanations at
-~8 terminal lines, unless running in "-vv" mode or running on CI.
-"""
-from typing import List
-from typing import Optional
-
-from _pytest.assertion import util
-from _pytest.nodes import Item
-
-
-DEFAULT_MAX_LINES = 8
-DEFAULT_MAX_CHARS = 8 * 80
-USAGE_MSG = "use '-vv' to show"
-
-
-def truncate_if_required(
- explanation: List[str], item: Item, max_length: Optional[int] = None
-) -> List[str]:
- """Truncate this assertion explanation if the given test item is eligible."""
- if _should_truncate_item(item):
- return _truncate_explanation(explanation)
- return explanation
-
-
-def _should_truncate_item(item: Item) -> bool:
- """Whether or not this test item is eligible for truncation."""
- verbose = item.config.option.verbose
- return verbose < 2 and not util.running_on_ci()
-
-
-def _truncate_explanation(
- input_lines: List[str],
- max_lines: Optional[int] = None,
- max_chars: Optional[int] = None,
-) -> List[str]:
- """Truncate given list of strings that makes up the assertion explanation.
-
- Truncates to either 8 lines, or 640 characters - whichever the input reaches
- first. The remaining lines will be replaced by a usage message.
- """
-
- if max_lines is None:
- max_lines = DEFAULT_MAX_LINES
- if max_chars is None:
- max_chars = DEFAULT_MAX_CHARS
-
- # Check if truncation required
- input_char_count = len("".join(input_lines))
- if len(input_lines) <= max_lines and input_char_count <= max_chars:
- return input_lines
-
- # Truncate first to max_lines, and then truncate to max_chars if max_chars
- # is exceeded.
- truncated_explanation = input_lines[:max_lines]
- truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
-
- # Add ellipsis to final line
- truncated_explanation[-1] = truncated_explanation[-1] + "..."
-
- # Append useful message to explanation
- truncated_line_count = len(input_lines) - len(truncated_explanation)
- truncated_line_count += 1 # Account for the part-truncated final line
- msg = "...Full output truncated"
- if truncated_line_count == 1:
- msg += f" ({truncated_line_count} line hidden)"
- else:
- msg += f" ({truncated_line_count} lines hidden)"
- msg += f", {USAGE_MSG}"
- truncated_explanation.extend(["", str(msg)])
- return truncated_explanation
-
-
-def _truncate_by_char_count(input_lines: List[str], max_chars: int) -> List[str]:
- # Check if truncation required
- if len("".join(input_lines)) <= max_chars:
- return input_lines
-
- # Find point at which input length exceeds total allowed length
- iterated_char_count = 0
- for iterated_index, input_line in enumerate(input_lines):
- if iterated_char_count + len(input_line) > max_chars:
- break
- iterated_char_count += len(input_line)
-
- # Create truncated explanation with modified final line
- truncated_result = input_lines[:iterated_index]
- final_line = input_lines[iterated_index]
- if final_line:
- final_line_truncate_point = max_chars - iterated_char_count
- final_line = final_line[:final_line_truncate_point]
- truncated_result.append(final_line)
- return truncated_result
diff --git a/contrib/python/pytest/py3/_pytest/assertion/util.py b/contrib/python/pytest/py3/_pytest/assertion/util.py
deleted file mode 100644
index b1f168767b..0000000000
--- a/contrib/python/pytest/py3/_pytest/assertion/util.py
+++ /dev/null
@@ -1,509 +0,0 @@
-"""Utilities for assertion debugging."""
-import collections.abc
-import os
-import pprint
-from typing import AbstractSet
-from typing import Any
-from typing import Callable
-from typing import Iterable
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-
-import _pytest._code
-from _pytest import outcomes
-from _pytest._io.saferepr import _pformat_dispatch
-from _pytest._io.saferepr import safeformat
-from _pytest._io.saferepr import saferepr
-from _pytest.config import Config
-
-# The _reprcompare attribute on the util module is used by the new assertion
-# interpretation code and assertion rewriter to detect this plugin was
-# loaded and in turn call the hooks defined here as part of the
-# DebugInterpreter.
-_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None
-
-# Works similarly as _reprcompare attribute. Is populated with the hook call
-# when pytest_runtest_setup is called.
-_assertion_pass: Optional[Callable[[int, str, str], None]] = None
-
-# Config object which is assigned during pytest_runtest_protocol.
-_config: Optional[Config] = None
-
-
-def format_explanation(explanation: str) -> str:
- r"""Format an explanation.
-
- Normally all embedded newlines are escaped, however there are
- three exceptions: \n{, \n} and \n~. The first two are intended
- cover nested explanations, see function and attribute explanations
- for examples (.visit_Call(), visit_Attribute()). The last one is
- for when one explanation needs to span multiple lines, e.g. when
- displaying diffs.
- """
- lines = _split_explanation(explanation)
- result = _format_lines(lines)
- return "\n".join(result)
-
-
-def _split_explanation(explanation: str) -> List[str]:
- r"""Return a list of individual lines in the explanation.
-
- This will return a list of lines split on '\n{', '\n}' and '\n~'.
- Any other newlines will be escaped and appear in the line as the
- literal '\n' characters.
- """
- raw_lines = (explanation or "").split("\n")
- lines = [raw_lines[0]]
- for values in raw_lines[1:]:
- if values and values[0] in ["{", "}", "~", ">"]:
- lines.append(values)
- else:
- lines[-1] += "\\n" + values
- return lines
-
-
-def _format_lines(lines: Sequence[str]) -> List[str]:
- """Format the individual lines.
-
- This will replace the '{', '}' and '~' characters of our mini formatting
- language with the proper 'where ...', 'and ...' and ' + ...' text, taking
- care of indentation along the way.
-
- Return a list of formatted lines.
- """
- result = list(lines[:1])
- stack = [0]
- stackcnt = [0]
- for line in lines[1:]:
- if line.startswith("{"):
- if stackcnt[-1]:
- s = "and "
- else:
- s = "where "
- stack.append(len(result))
- stackcnt[-1] += 1
- stackcnt.append(0)
- result.append(" +" + " " * (len(stack) - 1) + s + line[1:])
- elif line.startswith("}"):
- stack.pop()
- stackcnt.pop()
- result[stack[-1]] += line[1:]
- else:
- assert line[0] in ["~", ">"]
- stack[-1] += 1
- indent = len(stack) if line.startswith("~") else len(stack) - 1
- result.append(" " * indent + line[1:])
- assert len(stack) == 1
- return result
-
-
-def issequence(x: Any) -> bool:
- return isinstance(x, collections.abc.Sequence) and not isinstance(x, str)
-
-
-def istext(x: Any) -> bool:
- return isinstance(x, str)
-
-
-def isdict(x: Any) -> bool:
- return isinstance(x, dict)
-
-
-def isset(x: Any) -> bool:
- return isinstance(x, (set, frozenset))
-
-
-def isnamedtuple(obj: Any) -> bool:
- return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None
-
-
-def isdatacls(obj: Any) -> bool:
- return getattr(obj, "__dataclass_fields__", None) is not None
-
-
-def isattrs(obj: Any) -> bool:
- return getattr(obj, "__attrs_attrs__", None) is not None
-
-
-def isiterable(obj: Any) -> bool:
- try:
- iter(obj)
- return not istext(obj)
- except TypeError:
- return False
-
-
-def has_default_eq(
- obj: object,
-) -> bool:
- """Check if an instance of an object contains the default eq
-
- First, we check if the object's __eq__ attribute has __code__,
- if so, we check the equally of the method code filename (__code__.co_filename)
- to the default one generated by the dataclass and attr module
- for dataclasses the default co_filename is <string>, for attrs class, the __eq__ should contain "attrs eq generated"
- """
- # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68
- if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"):
- code_filename = obj.__eq__.__code__.co_filename
-
- if isattrs(obj):
- return "attrs generated eq" in code_filename
-
- return code_filename == "<string>" # data class
- return True
-
-
-def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[str]]:
- """Return specialised explanations for some operators/operands."""
- verbose = config.getoption("verbose")
- if verbose > 1:
- left_repr = safeformat(left)
- right_repr = safeformat(right)
- else:
- # XXX: "15 chars indentation" is wrong
- # ("E AssertionError: assert "); should use term width.
- maxsize = (
- 80 - 15 - len(op) - 2
- ) // 2 # 15 chars indentation, 1 space around op
- left_repr = saferepr(left, maxsize=maxsize)
- right_repr = saferepr(right, maxsize=maxsize)
-
- summary = f"{left_repr} {op} {right_repr}"
-
- explanation = None
- try:
- if op == "==":
- explanation = _compare_eq_any(left, right, verbose)
- elif op == "not in":
- if istext(left) and istext(right):
- explanation = _notin_text(left, right, verbose)
- except outcomes.Exit:
- raise
- except Exception:
- explanation = [
- "(pytest_assertion plugin: representation of details failed: {}.".format(
- _pytest._code.ExceptionInfo.from_current()._getreprcrash()
- ),
- " Probably an object has a faulty __repr__.)",
- ]
-
- if not explanation:
- return None
-
- return [summary] + explanation
-
-
-def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
- explanation = []
- if istext(left) and istext(right):
- explanation = _diff_text(left, right, verbose)
- else:
- from _pytest.python_api import ApproxBase
-
- if isinstance(left, ApproxBase) or isinstance(right, ApproxBase):
- # Although the common order should be obtained == expected, this ensures both ways
- approx_side = left if isinstance(left, ApproxBase) else right
- other_side = right if isinstance(left, ApproxBase) else left
-
- explanation = approx_side._repr_compare(other_side)
- elif type(left) == type(right) and (
- isdatacls(left) or isattrs(left) or isnamedtuple(left)
- ):
- # Note: unlike dataclasses/attrs, namedtuples compare only the
- # field values, not the type or field names. But this branch
- # intentionally only handles the same-type case, which was often
- # used in older code bases before dataclasses/attrs were available.
- explanation = _compare_eq_cls(left, right, verbose)
- elif issequence(left) and issequence(right):
- explanation = _compare_eq_sequence(left, right, verbose)
- elif isset(left) and isset(right):
- explanation = _compare_eq_set(left, right, verbose)
- elif isdict(left) and isdict(right):
- explanation = _compare_eq_dict(left, right, verbose)
-
- if isiterable(left) and isiterable(right):
- expl = _compare_eq_iterable(left, right, verbose)
- explanation.extend(expl)
-
- return explanation
-
-
-def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
- """Return the explanation for the diff between text.
-
- Unless --verbose is used this will skip leading and trailing
- characters which are identical to keep the diff minimal.
- """
- from difflib import ndiff
-
- explanation: List[str] = []
-
- if verbose < 1:
- i = 0 # just in case left or right has zero length
- for i in range(min(len(left), len(right))):
- if left[i] != right[i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation = [
- "Skipping %s identical leading characters in diff, use -v to show" % i
- ]
- left = left[i:]
- right = right[i:]
- if len(left) == len(right):
- for i in range(len(left)):
- if left[-i] != right[-i]:
- break
- if i > 42:
- i -= 10 # Provide some context
- explanation += [
- "Skipping {} identical trailing "
- "characters in diff, use -v to show".format(i)
- ]
- left = left[:-i]
- right = right[:-i]
- keepends = True
- if left.isspace() or right.isspace():
- left = repr(str(left))
- right = repr(str(right))
- explanation += ["Strings contain only whitespace, escaping them using repr()"]
- # "right" is the expected base against which we compare "left",
- # see https://github.com/pytest-dev/pytest/issues/3333
- explanation += [
- line.strip("\n")
- for line in ndiff(right.splitlines(keepends), left.splitlines(keepends))
- ]
- return explanation
-
-
-def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
- """Move opening/closing parenthesis/bracket to own lines."""
- opening = lines[0][:1]
- if opening in ["(", "[", "{"]:
- lines[0] = " " + lines[0][1:]
- lines[:] = [opening] + lines
- closing = lines[-1][-1:]
- if closing in [")", "]", "}"]:
- lines[-1] = lines[-1][:-1] + ","
- lines[:] = lines + [closing]
-
-
-def _compare_eq_iterable(
- left: Iterable[Any], right: Iterable[Any], verbose: int = 0
-) -> List[str]:
- if verbose <= 0 and not running_on_ci():
- return ["Use -v to get more diff"]
- # dynamic import to speedup pytest
- import difflib
-
- left_formatting = pprint.pformat(left).splitlines()
- right_formatting = pprint.pformat(right).splitlines()
-
- # Re-format for different output lengths.
- lines_left = len(left_formatting)
- lines_right = len(right_formatting)
- if lines_left != lines_right:
- left_formatting = _pformat_dispatch(left).splitlines()
- right_formatting = _pformat_dispatch(right).splitlines()
-
- if lines_left > 1 or lines_right > 1:
- _surrounding_parens_on_own_lines(left_formatting)
- _surrounding_parens_on_own_lines(right_formatting)
-
- explanation = ["Full diff:"]
- # "right" is the expected base against which we compare "left",
- # see https://github.com/pytest-dev/pytest/issues/3333
- explanation.extend(
- line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting)
- )
- return explanation
-
-
-def _compare_eq_sequence(
- left: Sequence[Any], right: Sequence[Any], verbose: int = 0
-) -> List[str]:
- comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
- explanation: List[str] = []
- len_left = len(left)
- len_right = len(right)
- for i in range(min(len_left, len_right)):
- if left[i] != right[i]:
- if comparing_bytes:
- # when comparing bytes, we want to see their ascii representation
- # instead of their numeric values (#5260)
- # using a slice gives us the ascii representation:
- # >>> s = b'foo'
- # >>> s[0]
- # 102
- # >>> s[0:1]
- # b'f'
- left_value = left[i : i + 1]
- right_value = right[i : i + 1]
- else:
- left_value = left[i]
- right_value = right[i]
-
- explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"]
- break
-
- if comparing_bytes:
- # when comparing bytes, it doesn't help to show the "sides contain one or more
- # items" longer explanation, so skip it
-
- return explanation
-
- len_diff = len_left - len_right
- if len_diff:
- if len_diff > 0:
- dir_with_more = "Left"
- extra = saferepr(left[len_right])
- else:
- len_diff = 0 - len_diff
- dir_with_more = "Right"
- extra = saferepr(right[len_left])
-
- if len_diff == 1:
- explanation += [f"{dir_with_more} contains one more item: {extra}"]
- else:
- explanation += [
- "%s contains %d more items, first extra item: %s"
- % (dir_with_more, len_diff, extra)
- ]
- return explanation
-
-
-def _compare_eq_set(
- left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
-) -> List[str]:
- explanation = []
- diff_left = left - right
- diff_right = right - left
- if diff_left:
- explanation.append("Extra items in the left set:")
- for item in diff_left:
- explanation.append(saferepr(item))
- if diff_right:
- explanation.append("Extra items in the right set:")
- for item in diff_right:
- explanation.append(saferepr(item))
- return explanation
-
-
-def _compare_eq_dict(
- left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
-) -> List[str]:
- explanation: List[str] = []
- set_left = set(left)
- set_right = set(right)
- common = set_left.intersection(set_right)
- same = {k: left[k] for k in common if left[k] == right[k]}
- if same and verbose < 2:
- explanation += ["Omitting %s identical items, use -vv to show" % len(same)]
- elif same:
- explanation += ["Common items:"]
- explanation += pprint.pformat(same).splitlines()
- diff = {k for k in common if left[k] != right[k]}
- if diff:
- explanation += ["Differing items:"]
- for k in diff:
- explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
- extra_left = set_left - set_right
- len_extra_left = len(extra_left)
- if len_extra_left:
- explanation.append(
- "Left contains %d more item%s:"
- % (len_extra_left, "" if len_extra_left == 1 else "s")
- )
- explanation.extend(
- pprint.pformat({k: left[k] for k in extra_left}).splitlines()
- )
- extra_right = set_right - set_left
- len_extra_right = len(extra_right)
- if len_extra_right:
- explanation.append(
- "Right contains %d more item%s:"
- % (len_extra_right, "" if len_extra_right == 1 else "s")
- )
- explanation.extend(
- pprint.pformat({k: right[k] for k in extra_right}).splitlines()
- )
- return explanation
-
-
-def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]:
- if not has_default_eq(left):
- return []
- if isdatacls(left):
- import dataclasses
-
- all_fields = dataclasses.fields(left)
- fields_to_check = [info.name for info in all_fields if info.compare]
- elif isattrs(left):
- all_fields = left.__attrs_attrs__
- fields_to_check = [field.name for field in all_fields if getattr(field, "eq")]
- elif isnamedtuple(left):
- fields_to_check = left._fields
- else:
- assert False
-
- indent = " "
- same = []
- diff = []
- for field in fields_to_check:
- if getattr(left, field) == getattr(right, field):
- same.append(field)
- else:
- diff.append(field)
-
- explanation = []
- if same or diff:
- explanation += [""]
- if same and verbose < 2:
- explanation.append("Omitting %s identical items, use -vv to show" % len(same))
- elif same:
- explanation += ["Matching attributes:"]
- explanation += pprint.pformat(same).splitlines()
- if diff:
- explanation += ["Differing attributes:"]
- explanation += pprint.pformat(diff).splitlines()
- for field in diff:
- field_left = getattr(left, field)
- field_right = getattr(right, field)
- explanation += [
- "",
- "Drill down into differing attribute %s:" % field,
- ("%s%s: %r != %r") % (indent, field, field_left, field_right),
- ]
- explanation += [
- indent + line
- for line in _compare_eq_any(field_left, field_right, verbose)
- ]
- return explanation
-
-
-def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]:
- index = text.find(term)
- head = text[:index]
- tail = text[index + len(term) :]
- correct_text = head + tail
- diff = _diff_text(text, correct_text, verbose)
- newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)]
- for line in diff:
- if line.startswith("Skipping"):
- continue
- if line.startswith("- "):
- continue
- if line.startswith("+ "):
- newdiff.append(" " + line[2:])
- else:
- newdiff.append(line)
- return newdiff
-
-
-def running_on_ci() -> bool:
- """Check if we're currently running on a CI system."""
- env_vars = ["CI", "BUILD_NUMBER"]
- return any(var in os.environ for var in env_vars)
diff --git a/contrib/python/pytest/py3/_pytest/cacheprovider.py b/contrib/python/pytest/py3/_pytest/cacheprovider.py
deleted file mode 100644
index 681d02b409..0000000000
--- a/contrib/python/pytest/py3/_pytest/cacheprovider.py
+++ /dev/null
@@ -1,580 +0,0 @@
-"""Implementation of the cache provider."""
-# This plugin was not named "cache" to avoid conflicts with the external
-# pytest-cache version.
-import json
-import os
-from pathlib import Path
-from typing import Dict
-from typing import Generator
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import Set
-from typing import Union
-
-import attr
-
-from .pathlib import resolve_from_str
-from .pathlib import rm_rf
-from .reports import CollectReport
-from _pytest import nodes
-from _pytest._io import TerminalWriter
-from _pytest.compat import final
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import FixtureRequest
-from _pytest.main import Session
-from _pytest.python import Module
-from _pytest.python import Package
-from _pytest.reports import TestReport
-
-
-README_CONTENT = """\
-# pytest cache directory #
-
-This directory contains data from the pytest's cache plugin,
-which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
-
-**Do not** commit this to version control.
-
-See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information.
-"""
-
-CACHEDIR_TAG_CONTENT = b"""\
-Signature: 8a477f597d28d172789f06886806bc55
-# This file is a cache directory tag created by pytest.
-# For information about cache directory tags, see:
-# https://bford.info/cachedir/spec.html
-"""
-
-
-@final
-@attr.s(init=False, auto_attribs=True)
-class Cache:
- _cachedir: Path = attr.ib(repr=False)
- _config: Config = attr.ib(repr=False)
-
- # Sub-directory under cache-dir for directories created by `mkdir()`.
- _CACHE_PREFIX_DIRS = "d"
-
- # Sub-directory under cache-dir for values created by `set()`.
- _CACHE_PREFIX_VALUES = "v"
-
- def __init__(
- self, cachedir: Path, config: Config, *, _ispytest: bool = False
- ) -> None:
- check_ispytest(_ispytest)
- self._cachedir = cachedir
- self._config = config
-
- @classmethod
- def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache":
- """Create the Cache instance for a Config.
-
- :meta private:
- """
- check_ispytest(_ispytest)
- cachedir = cls.cache_dir_from_config(config, _ispytest=True)
- if config.getoption("cacheclear") and cachedir.is_dir():
- cls.clear_cache(cachedir, _ispytest=True)
- return cls(cachedir, config, _ispytest=True)
-
- @classmethod
- def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None:
- """Clear the sub-directories used to hold cached directories and values.
-
- :meta private:
- """
- check_ispytest(_ispytest)
- for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
- d = cachedir / prefix
- if d.is_dir():
- rm_rf(d)
-
- @staticmethod
- def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path:
- """Get the path to the cache directory for a Config.
-
- :meta private:
- """
- check_ispytest(_ispytest)
- return resolve_from_str(config.getini("cache_dir"), config.rootpath)
-
- def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None:
- """Issue a cache warning.
-
- :meta private:
- """
- check_ispytest(_ispytest)
- import warnings
- from _pytest.warning_types import PytestCacheWarning
-
- warnings.warn(
- PytestCacheWarning(fmt.format(**args) if args else fmt),
- self._config.hook,
- stacklevel=3,
- )
-
- def mkdir(self, name: str) -> Path:
- """Return a directory path object with the given name.
-
- If the directory does not yet exist, it will be created. You can use
- it to manage files to e.g. store/retrieve database dumps across test
- sessions.
-
- .. versionadded:: 7.0
-
- :param name:
- Must be a string not containing a ``/`` separator.
- Make sure the name contains your plugin or application
- identifiers to prevent clashes with other cache users.
- """
- path = Path(name)
- if len(path.parts) > 1:
- raise ValueError("name is not allowed to contain path separators")
- res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path)
- res.mkdir(exist_ok=True, parents=True)
- return res
-
- def _getvaluepath(self, key: str) -> Path:
- return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key))
-
- def get(self, key: str, default):
- """Return the cached value for the given key.
-
- If no value was yet cached or the value cannot be read, the specified
- default is returned.
-
- :param key:
- Must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param default:
- The value to return in case of a cache-miss or invalid cache value.
- """
- path = self._getvaluepath(key)
- try:
- with path.open("r") as f:
- return json.load(f)
- except (ValueError, OSError):
- return default
-
- def set(self, key: str, value: object) -> None:
- """Save value for the given key.
-
- :param key:
- Must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param value:
- Must be of any combination of basic python types,
- including nested types like lists of dictionaries.
- """
- path = self._getvaluepath(key)
- try:
- if path.parent.is_dir():
- cache_dir_exists_already = True
- else:
- cache_dir_exists_already = self._cachedir.exists()
- path.parent.mkdir(exist_ok=True, parents=True)
- except OSError:
- self.warn("could not create cache path {path}", path=path, _ispytest=True)
- return
- if not cache_dir_exists_already:
- self._ensure_supporting_files()
- data = json.dumps(value, indent=2)
- try:
- f = path.open("w")
- except OSError:
- self.warn("cache could not write path {path}", path=path, _ispytest=True)
- else:
- with f:
- f.write(data)
-
- def _ensure_supporting_files(self) -> None:
- """Create supporting files in the cache dir that are not really part of the cache."""
- readme_path = self._cachedir / "README.md"
- readme_path.write_text(README_CONTENT)
-
- gitignore_path = self._cachedir.joinpath(".gitignore")
- msg = "# Created by pytest automatically.\n*\n"
- gitignore_path.write_text(msg, encoding="UTF-8")
-
- cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
- cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
-
-
-class LFPluginCollWrapper:
- def __init__(self, lfplugin: "LFPlugin") -> None:
- self.lfplugin = lfplugin
- self._collected_at_least_one_failure = False
-
- @hookimpl(hookwrapper=True)
- def pytest_make_collect_report(self, collector: nodes.Collector):
- if isinstance(collector, Session):
- out = yield
- res: CollectReport = out.get_result()
-
- # Sort any lf-paths to the beginning.
- lf_paths = self.lfplugin._last_failed_paths
-
- res.result = sorted(
- res.result,
- # use stable sort to priorize last failed
- key=lambda x: x.path in lf_paths,
- reverse=True,
- )
- return
-
- elif isinstance(collector, Module):
- if collector.path in self.lfplugin._last_failed_paths:
- out = yield
- res = out.get_result()
- result = res.result
- lastfailed = self.lfplugin.lastfailed
-
- # Only filter with known failures.
- if not self._collected_at_least_one_failure:
- if not any(x.nodeid in lastfailed for x in result):
- return
- self.lfplugin.config.pluginmanager.register(
- LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
- )
- self._collected_at_least_one_failure = True
-
- session = collector.session
- result[:] = [
- x
- for x in result
- if x.nodeid in lastfailed
- # Include any passed arguments (not trivial to filter).
- or session.isinitpath(x.path)
- # Keep all sub-collectors.
- or isinstance(x, nodes.Collector)
- ]
- return
- yield
-
-
-class LFPluginCollSkipfiles:
- def __init__(self, lfplugin: "LFPlugin") -> None:
- self.lfplugin = lfplugin
-
- @hookimpl
- def pytest_make_collect_report(
- self, collector: nodes.Collector
- ) -> Optional[CollectReport]:
- # Packages are Modules, but _last_failed_paths only contains
- # test-bearing paths and doesn't try to include the paths of their
- # packages, so don't filter them.
- if isinstance(collector, Module) and not isinstance(collector, Package):
- if collector.path not in self.lfplugin._last_failed_paths:
- self.lfplugin._skipped_files += 1
-
- return CollectReport(
- collector.nodeid, "passed", longrepr=None, result=[]
- )
- return None
-
-
-class LFPlugin:
- """Plugin which implements the --lf (run last-failing) option."""
-
- def __init__(self, config: Config) -> None:
- self.config = config
- active_keys = "lf", "failedfirst"
- self.active = any(config.getoption(key) for key in active_keys)
- assert config.cache
- self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {})
- self._previously_failed_count: Optional[int] = None
- self._report_status: Optional[str] = None
- self._skipped_files = 0 # count skipped files during collection due to --lf
-
- if config.getoption("lf"):
- self._last_failed_paths = self.get_last_failed_paths()
- config.pluginmanager.register(
- LFPluginCollWrapper(self), "lfplugin-collwrapper"
- )
-
- def get_last_failed_paths(self) -> Set[Path]:
- """Return a set with all Paths()s of the previously failed nodeids."""
- rootpath = self.config.rootpath
- result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed}
- return {x for x in result if x.exists()}
-
- def pytest_report_collectionfinish(self) -> Optional[str]:
- if self.active and self.config.getoption("verbose") >= 0:
- return "run-last-failure: %s" % self._report_status
- return None
-
- def pytest_runtest_logreport(self, report: TestReport) -> None:
- if (report.when == "call" and report.passed) or report.skipped:
- self.lastfailed.pop(report.nodeid, None)
- elif report.failed:
- self.lastfailed[report.nodeid] = True
-
- def pytest_collectreport(self, report: CollectReport) -> None:
- passed = report.outcome in ("passed", "skipped")
- if passed:
- if report.nodeid in self.lastfailed:
- self.lastfailed.pop(report.nodeid)
- self.lastfailed.update((item.nodeid, True) for item in report.result)
- else:
- self.lastfailed[report.nodeid] = True
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_collection_modifyitems(
- self, config: Config, items: List[nodes.Item]
- ) -> Generator[None, None, None]:
- yield
-
- if not self.active:
- return
-
- if self.lastfailed:
- previously_failed = []
- previously_passed = []
- for item in items:
- if item.nodeid in self.lastfailed:
- previously_failed.append(item)
- else:
- previously_passed.append(item)
- self._previously_failed_count = len(previously_failed)
-
- if not previously_failed:
- # Running a subset of all tests with recorded failures
- # only outside of it.
- self._report_status = "%d known failures not in selected tests" % (
- len(self.lastfailed),
- )
- else:
- if self.config.getoption("lf"):
- items[:] = previously_failed
- config.hook.pytest_deselected(items=previously_passed)
- else: # --failedfirst
- items[:] = previously_failed + previously_passed
-
- noun = "failure" if self._previously_failed_count == 1 else "failures"
- suffix = " first" if self.config.getoption("failedfirst") else ""
- self._report_status = "rerun previous {count} {noun}{suffix}".format(
- count=self._previously_failed_count, suffix=suffix, noun=noun
- )
-
- if self._skipped_files > 0:
- files_noun = "file" if self._skipped_files == 1 else "files"
- self._report_status += " (skipped {files} {files_noun})".format(
- files=self._skipped_files, files_noun=files_noun
- )
- else:
- self._report_status = "no previously failed tests, "
- if self.config.getoption("last_failed_no_failures") == "none":
- self._report_status += "deselecting all items."
- config.hook.pytest_deselected(items=items[:])
- items[:] = []
- else:
- self._report_status += "not deselecting items."
-
- def pytest_sessionfinish(self, session: Session) -> None:
- config = self.config
- if config.getoption("cacheshow") or hasattr(config, "workerinput"):
- return
-
- assert config.cache is not None
- saved_lastfailed = config.cache.get("cache/lastfailed", {})
- if saved_lastfailed != self.lastfailed:
- config.cache.set("cache/lastfailed", self.lastfailed)
-
-
-class NFPlugin:
- """Plugin which implements the --nf (run new-first) option."""
-
- def __init__(self, config: Config) -> None:
- self.config = config
- self.active = config.option.newfirst
- assert config.cache is not None
- self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_collection_modifyitems(
- self, items: List[nodes.Item]
- ) -> Generator[None, None, None]:
- yield
-
- if self.active:
- new_items: Dict[str, nodes.Item] = {}
- other_items: Dict[str, nodes.Item] = {}
- for item in items:
- if item.nodeid not in self.cached_nodeids:
- new_items[item.nodeid] = item
- else:
- other_items[item.nodeid] = item
-
- items[:] = self._get_increasing_order(
- new_items.values()
- ) + self._get_increasing_order(other_items.values())
- self.cached_nodeids.update(new_items)
- else:
- self.cached_nodeids.update(item.nodeid for item in items)
-
- def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]:
- return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return]
-
- def pytest_sessionfinish(self) -> None:
- config = self.config
- if config.getoption("cacheshow") or hasattr(config, "workerinput"):
- return
-
- if config.getoption("collectonly"):
- return
-
- assert config.cache is not None
- config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--lf",
- "--last-failed",
- action="store_true",
- dest="lf",
- help="rerun only the tests that failed "
- "at the last run (or all if none failed)",
- )
- group.addoption(
- "--ff",
- "--failed-first",
- action="store_true",
- dest="failedfirst",
- help="run all tests, but run the last failures first.\n"
- "This may re-order tests and thus lead to "
- "repeated fixture setup/teardown.",
- )
- group.addoption(
- "--nf",
- "--new-first",
- action="store_true",
- dest="newfirst",
- help="run tests from new files first, then the rest of the tests "
- "sorted by file mtime",
- )
- group.addoption(
- "--cache-show",
- action="append",
- nargs="?",
- dest="cacheshow",
- help=(
- "show cache contents, don't perform collection or tests. "
- "Optional argument: glob (default: '*')."
- ),
- )
- group.addoption(
- "--cache-clear",
- action="store_true",
- dest="cacheclear",
- help="remove all cache contents at start of test run.",
- )
- cache_dir_default = ".pytest_cache"
- if "TOX_ENV_DIR" in os.environ:
- cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
- parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
- group.addoption(
- "--lfnf",
- "--last-failed-no-failures",
- action="store",
- dest="last_failed_no_failures",
- choices=("all", "none"),
- default="all",
- help="which tests to run with no previously (known) failures.",
- )
-
-
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- if config.option.cacheshow:
- from _pytest.main import wrap_session
-
- return wrap_session(config, cacheshow)
- return None
-
-
-@hookimpl(tryfirst=True)
-def pytest_configure(config: Config) -> None:
- config.cache = Cache.for_config(config, _ispytest=True)
- config.pluginmanager.register(LFPlugin(config), "lfplugin")
- config.pluginmanager.register(NFPlugin(config), "nfplugin")
-
-
-@fixture
-def cache(request: FixtureRequest) -> Cache:
- """Return a cache object that can persist state between testing sessions.
-
- cache.get(key, default)
- cache.set(key, value)
-
- Keys must be ``/`` separated strings, where the first part is usually the
- name of your plugin or application to avoid clashes with other cache users.
-
- Values can be any object handled by the json stdlib module.
- """
- assert request.config.cache is not None
- return request.config.cache
-
-
-def pytest_report_header(config: Config) -> Optional[str]:
- """Display cachedir with --cache-show and if non-default."""
- if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
- assert config.cache is not None
- cachedir = config.cache._cachedir
- # TODO: evaluate generating upward relative paths
- # starting with .., ../.. if sensible
-
- try:
- displaypath = cachedir.relative_to(config.rootpath)
- except ValueError:
- displaypath = cachedir
- return f"cachedir: {displaypath}"
- return None
-
-
-def cacheshow(config: Config, session: Session) -> int:
- from pprint import pformat
-
- assert config.cache is not None
-
- tw = TerminalWriter()
- tw.line("cachedir: " + str(config.cache._cachedir))
- if not config.cache._cachedir.is_dir():
- tw.line("cache is empty")
- return 0
-
- glob = config.option.cacheshow[0]
- if glob is None:
- glob = "*"
-
- dummy = object()
- basedir = config.cache._cachedir
- vdir = basedir / Cache._CACHE_PREFIX_VALUES
- tw.sep("-", "cache values for %r" % glob)
- for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
- key = str(valpath.relative_to(vdir))
- val = config.cache.get(key, dummy)
- if val is dummy:
- tw.line("%s contains unreadable content, will be ignored" % key)
- else:
- tw.line("%s contains:" % key)
- for line in pformat(val).splitlines():
- tw.line(" " + line)
-
- ddir = basedir / Cache._CACHE_PREFIX_DIRS
- if ddir.is_dir():
- contents = sorted(ddir.rglob(glob))
- tw.sep("-", "cache directories for %r" % glob)
- for p in contents:
- # if p.is_dir():
- # print("%s/" % p.relative_to(basedir))
- if p.is_file():
- key = str(p.relative_to(basedir))
- tw.line(f"{key} is a file of length {p.stat().st_size:d}")
- return 0
diff --git a/contrib/python/pytest/py3/_pytest/capture.py b/contrib/python/pytest/py3/_pytest/capture.py
deleted file mode 100644
index ee9de37332..0000000000
--- a/contrib/python/pytest/py3/_pytest/capture.py
+++ /dev/null
@@ -1,942 +0,0 @@
-"""Per-test stdout/stderr capturing mechanism."""
-import contextlib
-import functools
-import io
-import os
-import sys
-from io import UnsupportedOperation
-from tempfile import TemporaryFile
-from typing import Any
-from typing import AnyStr
-from typing import Generator
-from typing import Generic
-from typing import Iterator
-from typing import Optional
-from typing import TextIO
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-from _pytest.compat import final
-from _pytest.config import Config
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import SubRequest
-from _pytest.nodes import Collector
-from _pytest.nodes import File
-from _pytest.nodes import Item
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
- _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"]
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group._addoption(
- "--capture",
- action="store",
- default="fd",
- metavar="method",
- choices=["fd", "sys", "no", "tee-sys"],
- help="per-test capturing method: one of fd|sys|no|tee-sys.",
- )
- group._addoption(
- "-s",
- action="store_const",
- const="no",
- dest="capture",
- help="shortcut for --capture=no.",
- )
-
-
-def _colorama_workaround() -> None:
- """Ensure colorama is imported so that it attaches to the correct stdio
- handles on Windows.
-
- colorama uses the terminal on import time. So if something does the
- first import of colorama while I/O capture is active, colorama will
- fail in various ways.
- """
- if sys.platform.startswith("win32"):
- try:
- import colorama # noqa: F401
- except ImportError:
- pass
-
-
-def _windowsconsoleio_workaround(stream: TextIO) -> None:
- """Workaround for Windows Unicode console handling.
-
- Python 3.6 implemented Unicode console handling for Windows. This works
- by reading/writing to the raw console handle using
- ``{Read,Write}ConsoleW``.
-
- The problem is that we are going to ``dup2`` over the stdio file
- descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
- handles used by Python to write to the console. Though there is still some
- weirdness and the console handle seems to only be closed randomly and not
- on the first call to ``CloseHandle``, or maybe it gets reopened with the
- same handle value when we suspend capturing.
-
- The workaround in this case will reopen stdio with a different fd which
- also means a different handle by replicating the logic in
- "Py_lifecycle.c:initstdio/create_stdio".
-
- :param stream:
- In practice ``sys.stdout`` or ``sys.stderr``, but given
- here as parameter for unittesting purposes.
-
- See https://github.com/pytest-dev/py/issues/103.
- """
- if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"):
- return
-
- # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666).
- if not hasattr(stream, "buffer"): # type: ignore[unreachable]
- return
-
- buffered = hasattr(stream.buffer, "raw")
- raw_stdout = stream.buffer.raw if buffered else stream.buffer # type: ignore[attr-defined]
-
- if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined]
- return
-
- def _reopen_stdio(f, mode):
- if not buffered and mode[0] == "w":
- buffering = 0
- else:
- buffering = -1
-
- return io.TextIOWrapper(
- open(os.dup(f.fileno()), mode, buffering),
- f.encoding,
- f.errors,
- f.newlines,
- f.line_buffering,
- )
-
- sys.stdin = _reopen_stdio(sys.stdin, "rb")
- sys.stdout = _reopen_stdio(sys.stdout, "wb")
- sys.stderr = _reopen_stdio(sys.stderr, "wb")
-
-
-@hookimpl(hookwrapper=True)
-def pytest_load_initial_conftests(early_config: Config):
- ns = early_config.known_args_namespace
- if ns.capture == "fd":
- _windowsconsoleio_workaround(sys.stdout)
- _colorama_workaround()
- pluginmanager = early_config.pluginmanager
- capman = CaptureManager(ns.capture)
- pluginmanager.register(capman, "capturemanager")
-
- # Make sure that capturemanager is properly reset at final shutdown.
- early_config.add_cleanup(capman.stop_global_capturing)
-
- # Finally trigger conftest loading but while capturing (issue #93).
- capman.start_global_capturing()
- outcome = yield
- capman.suspend_global_capture()
- if outcome.excinfo is not None:
- out, err = capman.read_global_capture()
- sys.stdout.write(out)
- sys.stderr.write(err)
-
-
-# IO Helpers.
-
-
-class EncodedFile(io.TextIOWrapper):
- __slots__ = ()
-
- @property
- def name(self) -> str:
- # Ensure that file.name is a string. Workaround for a Python bug
- # fixed in >=3.7.4: https://bugs.python.org/issue36015
- return repr(self.buffer)
-
- @property
- def mode(self) -> str:
- # TextIOWrapper doesn't expose a mode, but at least some of our
- # tests check it.
- return self.buffer.mode.replace("b", "")
-
-
-class CaptureIO(io.TextIOWrapper):
- def __init__(self) -> None:
- super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
-
- def getvalue(self) -> str:
- assert isinstance(self.buffer, io.BytesIO)
- return self.buffer.getvalue().decode("UTF-8")
-
-
-class TeeCaptureIO(CaptureIO):
- def __init__(self, other: TextIO) -> None:
- self._other = other
- super().__init__()
-
- def write(self, s: str) -> int:
- super().write(s)
- return self._other.write(s)
-
-
-class DontReadFromInput:
- encoding = None
-
- def read(self, *args):
- raise OSError(
- "pytest: reading from stdin while output is captured! Consider using `-s`."
- )
-
- readline = read
- readlines = read
- __next__ = read
-
- def __iter__(self):
- return self
-
- def fileno(self) -> int:
- raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
-
- def isatty(self) -> bool:
- return False
-
- def close(self) -> None:
- pass
-
- @property
- def buffer(self):
- return self
-
-
-# Capture classes.
-
-
-patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
-
-
-class NoCapture:
- EMPTY_BUFFER = None
- __init__ = start = done = suspend = resume = lambda *args: None
-
-
-class SysCaptureBinary:
-
- EMPTY_BUFFER = b""
-
- def __init__(self, fd: int, tmpfile=None, *, tee: bool = False) -> None:
- name = patchsysdict[fd]
- self._old = getattr(sys, name)
- self.name = name
- if tmpfile is None:
- if name == "stdin":
- tmpfile = DontReadFromInput()
- else:
- tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old)
- self.tmpfile = tmpfile
- self._state = "initialized"
-
- def repr(self, class_name: str) -> str:
- return "<{} {} _old={} _state={!r} tmpfile={!r}>".format(
- class_name,
- self.name,
- hasattr(self, "_old") and repr(self._old) or "<UNSET>",
- self._state,
- self.tmpfile,
- )
-
- def __repr__(self) -> str:
- return "<{} {} _old={} _state={!r} tmpfile={!r}>".format(
- self.__class__.__name__,
- self.name,
- hasattr(self, "_old") and repr(self._old) or "<UNSET>",
- self._state,
- self.tmpfile,
- )
-
- def _assert_state(self, op: str, states: Tuple[str, ...]) -> None:
- assert (
- self._state in states
- ), "cannot {} in state {!r}: expected one of {}".format(
- op, self._state, ", ".join(states)
- )
-
- def start(self) -> None:
- self._assert_state("start", ("initialized",))
- setattr(sys, self.name, self.tmpfile)
- self._state = "started"
-
- def snap(self):
- self._assert_state("snap", ("started", "suspended"))
- self.tmpfile.seek(0)
- res = self.tmpfile.buffer.read()
- self.tmpfile.seek(0)
- self.tmpfile.truncate()
- return res
-
- def done(self) -> None:
- self._assert_state("done", ("initialized", "started", "suspended", "done"))
- if self._state == "done":
- return
- setattr(sys, self.name, self._old)
- del self._old
- self.tmpfile.close()
- self._state = "done"
-
- def suspend(self) -> None:
- self._assert_state("suspend", ("started", "suspended"))
- setattr(sys, self.name, self._old)
- self._state = "suspended"
-
- def resume(self) -> None:
- self._assert_state("resume", ("started", "suspended"))
- if self._state == "started":
- return
- setattr(sys, self.name, self.tmpfile)
- self._state = "started"
-
- def writeorg(self, data) -> None:
- self._assert_state("writeorg", ("started", "suspended"))
- self._old.flush()
- self._old.buffer.write(data)
- self._old.buffer.flush()
-
-
-class SysCapture(SysCaptureBinary):
- EMPTY_BUFFER = "" # type: ignore[assignment]
-
- def snap(self):
- res = self.tmpfile.getvalue()
- self.tmpfile.seek(0)
- self.tmpfile.truncate()
- return res
-
- def writeorg(self, data):
- self._assert_state("writeorg", ("started", "suspended"))
- self._old.write(data)
- self._old.flush()
-
-
-class FDCaptureBinary:
- """Capture IO to/from a given OS-level file descriptor.
-
- snap() produces `bytes`.
- """
-
- EMPTY_BUFFER = b""
-
- def __init__(self, targetfd: int) -> None:
- self.targetfd = targetfd
-
- try:
- os.fstat(targetfd)
- except OSError:
- # FD capturing is conceptually simple -- create a temporary file,
- # redirect the FD to it, redirect back when done. But when the
- # target FD is invalid it throws a wrench into this lovely scheme.
- #
- # Tests themselves shouldn't care if the FD is valid, FD capturing
- # should work regardless of external circumstances. So falling back
- # to just sys capturing is not a good option.
- #
- # Further complications are the need to support suspend() and the
- # possibility of FD reuse (e.g. the tmpfile getting the very same
- # target FD). The following approach is robust, I believe.
- self.targetfd_invalid: Optional[int] = os.open(os.devnull, os.O_RDWR)
- os.dup2(self.targetfd_invalid, targetfd)
- else:
- self.targetfd_invalid = None
- self.targetfd_save = os.dup(targetfd)
-
- if targetfd == 0:
- self.tmpfile = open(os.devnull)
- self.syscapture = SysCapture(targetfd)
- else:
- self.tmpfile = EncodedFile(
- TemporaryFile(buffering=0),
- encoding="utf-8",
- errors="replace",
- newline="",
- write_through=True,
- )
- if targetfd in patchsysdict:
- self.syscapture = SysCapture(targetfd, self.tmpfile)
- else:
- self.syscapture = NoCapture()
-
- self._state = "initialized"
-
- def __repr__(self) -> str:
- return "<{} {} oldfd={} _state={!r} tmpfile={!r}>".format(
- self.__class__.__name__,
- self.targetfd,
- self.targetfd_save,
- self._state,
- self.tmpfile,
- )
-
- def _assert_state(self, op: str, states: Tuple[str, ...]) -> None:
- assert (
- self._state in states
- ), "cannot {} in state {!r}: expected one of {}".format(
- op, self._state, ", ".join(states)
- )
-
- def start(self) -> None:
- """Start capturing on targetfd using memorized tmpfile."""
- self._assert_state("start", ("initialized",))
- os.dup2(self.tmpfile.fileno(), self.targetfd)
- self.syscapture.start()
- self._state = "started"
-
- def snap(self):
- self._assert_state("snap", ("started", "suspended"))
- self.tmpfile.seek(0)
- res = self.tmpfile.buffer.read()
- self.tmpfile.seek(0)
- self.tmpfile.truncate()
- return res
-
- def done(self) -> None:
- """Stop capturing, restore streams, return original capture file,
- seeked to position zero."""
- self._assert_state("done", ("initialized", "started", "suspended", "done"))
- if self._state == "done":
- return
- os.dup2(self.targetfd_save, self.targetfd)
- os.close(self.targetfd_save)
- if self.targetfd_invalid is not None:
- if self.targetfd_invalid != self.targetfd:
- os.close(self.targetfd)
- os.close(self.targetfd_invalid)
- self.syscapture.done()
- self.tmpfile.close()
- self._state = "done"
-
- def suspend(self) -> None:
- self._assert_state("suspend", ("started", "suspended"))
- if self._state == "suspended":
- return
- self.syscapture.suspend()
- os.dup2(self.targetfd_save, self.targetfd)
- self._state = "suspended"
-
- def resume(self) -> None:
- self._assert_state("resume", ("started", "suspended"))
- if self._state == "started":
- return
- self.syscapture.resume()
- os.dup2(self.tmpfile.fileno(), self.targetfd)
- self._state = "started"
-
- def writeorg(self, data):
- """Write to original file descriptor."""
- self._assert_state("writeorg", ("started", "suspended"))
- os.write(self.targetfd_save, data)
-
-
-class FDCapture(FDCaptureBinary):
- """Capture IO to/from a given OS-level file descriptor.
-
- snap() produces text.
- """
-
- # Ignore type because it doesn't match the type in the superclass (bytes).
- EMPTY_BUFFER = "" # type: ignore
-
- def snap(self):
- self._assert_state("snap", ("started", "suspended"))
- self.tmpfile.seek(0)
- res = self.tmpfile.read()
- self.tmpfile.seek(0)
- self.tmpfile.truncate()
- return res
-
- def writeorg(self, data):
- """Write to original file descriptor."""
- super().writeorg(data.encode("utf-8")) # XXX use encoding of original stream
-
-
-# MultiCapture
-
-
-# This class was a namedtuple, but due to mypy limitation[0] it could not be
-# made generic, so was replaced by a regular class which tries to emulate the
-# pertinent parts of a namedtuple. If the mypy limitation is ever lifted, can
-# make it a namedtuple again.
-# [0]: https://github.com/python/mypy/issues/685
-@final
-@functools.total_ordering
-class CaptureResult(Generic[AnyStr]):
- """The result of :method:`CaptureFixture.readouterr`."""
-
- __slots__ = ("out", "err")
-
- def __init__(self, out: AnyStr, err: AnyStr) -> None:
- self.out: AnyStr = out
- self.err: AnyStr = err
-
- def __len__(self) -> int:
- return 2
-
- def __iter__(self) -> Iterator[AnyStr]:
- return iter((self.out, self.err))
-
- def __getitem__(self, item: int) -> AnyStr:
- return tuple(self)[item]
-
- def _replace(
- self, *, out: Optional[AnyStr] = None, err: Optional[AnyStr] = None
- ) -> "CaptureResult[AnyStr]":
- return CaptureResult(
- out=self.out if out is None else out, err=self.err if err is None else err
- )
-
- def count(self, value: AnyStr) -> int:
- return tuple(self).count(value)
-
- def index(self, value) -> int:
- return tuple(self).index(value)
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, (CaptureResult, tuple)):
- return NotImplemented
- return tuple(self) == tuple(other)
-
- def __hash__(self) -> int:
- return hash(tuple(self))
-
- def __lt__(self, other: object) -> bool:
- if not isinstance(other, (CaptureResult, tuple)):
- return NotImplemented
- return tuple(self) < tuple(other)
-
- def __repr__(self) -> str:
- return f"CaptureResult(out={self.out!r}, err={self.err!r})"
-
-
-class MultiCapture(Generic[AnyStr]):
- _state = None
- _in_suspended = False
-
- def __init__(self, in_, out, err) -> None:
- self.in_ = in_
- self.out = out
- self.err = err
-
- def __repr__(self) -> str:
- return "<MultiCapture out={!r} err={!r} in_={!r} _state={!r} _in_suspended={!r}>".format(
- self.out,
- self.err,
- self.in_,
- self._state,
- self._in_suspended,
- )
-
- def start_capturing(self) -> None:
- self._state = "started"
- if self.in_:
- self.in_.start()
- if self.out:
- self.out.start()
- if self.err:
- self.err.start()
-
- def pop_outerr_to_orig(self) -> Tuple[AnyStr, AnyStr]:
- """Pop current snapshot out/err capture and flush to orig streams."""
- out, err = self.readouterr()
- if out:
- self.out.writeorg(out)
- if err:
- self.err.writeorg(err)
- return out, err
-
- def suspend_capturing(self, in_: bool = False) -> None:
- self._state = "suspended"
- if self.out:
- self.out.suspend()
- if self.err:
- self.err.suspend()
- if in_ and self.in_:
- self.in_.suspend()
- self._in_suspended = True
-
- def resume_capturing(self) -> None:
- self._state = "started"
- if self.out:
- self.out.resume()
- if self.err:
- self.err.resume()
- if self._in_suspended:
- self.in_.resume()
- self._in_suspended = False
-
- def stop_capturing(self) -> None:
- """Stop capturing and reset capturing streams."""
- if self._state == "stopped":
- raise ValueError("was already stopped")
- self._state = "stopped"
- if self.out:
- self.out.done()
- if self.err:
- self.err.done()
- if self.in_:
- self.in_.done()
-
- def is_started(self) -> bool:
- """Whether actively capturing -- not suspended or stopped."""
- return self._state == "started"
-
- def readouterr(self) -> CaptureResult[AnyStr]:
- out = self.out.snap() if self.out else ""
- err = self.err.snap() if self.err else ""
- return CaptureResult(out, err)
-
-
-def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]:
- if method == "fd":
- return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2))
- elif method == "sys":
- return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2))
- elif method == "no":
- return MultiCapture(in_=None, out=None, err=None)
- elif method == "tee-sys":
- return MultiCapture(
- in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True)
- )
- raise ValueError(f"unknown capturing method: {method!r}")
-
-
-# CaptureManager and CaptureFixture
-
-
-class CaptureManager:
- """The capture plugin.
-
- Manages that the appropriate capture method is enabled/disabled during
- collection and each test phase (setup, call, teardown). After each of
- those points, the captured output is obtained and attached to the
- collection/runtest report.
-
- There are two levels of capture:
-
- * global: enabled by default and can be suppressed by the ``-s``
- option. This is always enabled/disabled during collection and each test
- phase.
-
- * fixture: when a test function or one of its fixture depend on the
- ``capsys`` or ``capfd`` fixtures. In this case special handling is
- needed to ensure the fixtures take precedence over the global capture.
- """
-
- def __init__(self, method: "_CaptureMethod") -> None:
- self._method = method
- self._global_capturing: Optional[MultiCapture[str]] = None
- self._capture_fixture: Optional[CaptureFixture[Any]] = None
-
- def __repr__(self) -> str:
- return "<CaptureManager _method={!r} _global_capturing={!r} _capture_fixture={!r}>".format(
- self._method, self._global_capturing, self._capture_fixture
- )
-
- def is_capturing(self) -> Union[str, bool]:
- if self.is_globally_capturing():
- return "global"
- if self._capture_fixture:
- return "fixture %s" % self._capture_fixture.request.fixturename
- return False
-
- # Global capturing control
-
- def is_globally_capturing(self) -> bool:
- return self._method != "no"
-
- def start_global_capturing(self) -> None:
- assert self._global_capturing is None
- self._global_capturing = _get_multicapture(self._method)
- self._global_capturing.start_capturing()
-
- def stop_global_capturing(self) -> None:
- if self._global_capturing is not None:
- self._global_capturing.pop_outerr_to_orig()
- self._global_capturing.stop_capturing()
- self._global_capturing = None
-
- def resume_global_capture(self) -> None:
- # During teardown of the python process, and on rare occasions, capture
- # attributes can be `None` while trying to resume global capture.
- if self._global_capturing is not None:
- self._global_capturing.resume_capturing()
-
- def suspend_global_capture(self, in_: bool = False) -> None:
- if self._global_capturing is not None:
- self._global_capturing.suspend_capturing(in_=in_)
-
- def suspend(self, in_: bool = False) -> None:
- # Need to undo local capsys-et-al if it exists before disabling global capture.
- self.suspend_fixture()
- self.suspend_global_capture(in_)
-
- def resume(self) -> None:
- self.resume_global_capture()
- self.resume_fixture()
-
- def read_global_capture(self) -> CaptureResult[str]:
- assert self._global_capturing is not None
- return self._global_capturing.readouterr()
-
- # Fixture Control
-
- def set_fixture(self, capture_fixture: "CaptureFixture[Any]") -> None:
- if self._capture_fixture:
- current_fixture = self._capture_fixture.request.fixturename
- requested_fixture = capture_fixture.request.fixturename
- capture_fixture.request.raiseerror(
- "cannot use {} and {} at the same time".format(
- requested_fixture, current_fixture
- )
- )
- self._capture_fixture = capture_fixture
-
- def unset_fixture(self) -> None:
- self._capture_fixture = None
-
- def activate_fixture(self) -> None:
- """If the current item is using ``capsys`` or ``capfd``, activate
- them so they take precedence over the global capture."""
- if self._capture_fixture:
- self._capture_fixture._start()
-
- def deactivate_fixture(self) -> None:
- """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any."""
- if self._capture_fixture:
- self._capture_fixture.close()
-
- def suspend_fixture(self) -> None:
- if self._capture_fixture:
- self._capture_fixture._suspend()
-
- def resume_fixture(self) -> None:
- if self._capture_fixture:
- self._capture_fixture._resume()
-
- # Helper context managers
-
- @contextlib.contextmanager
- def global_and_fixture_disabled(self) -> Generator[None, None, None]:
- """Context manager to temporarily disable global and current fixture capturing."""
- do_fixture = self._capture_fixture and self._capture_fixture._is_started()
- if do_fixture:
- self.suspend_fixture()
- do_global = self._global_capturing and self._global_capturing.is_started()
- if do_global:
- self.suspend_global_capture()
- try:
- yield
- finally:
- if do_global:
- self.resume_global_capture()
- if do_fixture:
- self.resume_fixture()
-
- @contextlib.contextmanager
- def item_capture(self, when: str, item: Item) -> Generator[None, None, None]:
- self.resume_global_capture()
- self.activate_fixture()
- try:
- yield
- finally:
- self.deactivate_fixture()
- self.suspend_global_capture(in_=False)
-
- out, err = self.read_global_capture()
- item.add_report_section(when, "stdout", out)
- item.add_report_section(when, "stderr", err)
-
- # Hooks
-
- @hookimpl(hookwrapper=True)
- def pytest_make_collect_report(self, collector: Collector):
- if isinstance(collector, File):
- self.resume_global_capture()
- outcome = yield
- self.suspend_global_capture()
- out, err = self.read_global_capture()
- rep = outcome.get_result()
- if out:
- rep.sections.append(("Captured stdout", out))
- if err:
- rep.sections.append(("Captured stderr", err))
- else:
- yield
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]:
- with self.item_capture("setup", item):
- yield
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]:
- with self.item_capture("call", item):
- yield
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]:
- with self.item_capture("teardown", item):
- yield
-
- @hookimpl(tryfirst=True)
- def pytest_keyboard_interrupt(self) -> None:
- self.stop_global_capturing()
-
- @hookimpl(tryfirst=True)
- def pytest_internalerror(self) -> None:
- self.stop_global_capturing()
-
-
-class CaptureFixture(Generic[AnyStr]):
- """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`,
- :fixture:`capfd` and :fixture:`capfdbinary` fixtures."""
-
- def __init__(
- self, captureclass, request: SubRequest, *, _ispytest: bool = False
- ) -> None:
- check_ispytest(_ispytest)
- self.captureclass = captureclass
- self.request = request
- self._capture: Optional[MultiCapture[AnyStr]] = None
- self._captured_out = self.captureclass.EMPTY_BUFFER
- self._captured_err = self.captureclass.EMPTY_BUFFER
-
- def _start(self) -> None:
- if self._capture is None:
- self._capture = MultiCapture(
- in_=None,
- out=self.captureclass(1),
- err=self.captureclass(2),
- )
- self._capture.start_capturing()
-
- def close(self) -> None:
- if self._capture is not None:
- out, err = self._capture.pop_outerr_to_orig()
- self._captured_out += out
- self._captured_err += err
- self._capture.stop_capturing()
- self._capture = None
-
- def readouterr(self) -> CaptureResult[AnyStr]:
- """Read and return the captured output so far, resetting the internal
- buffer.
-
- :returns:
- The captured content as a namedtuple with ``out`` and ``err``
- string attributes.
- """
- captured_out, captured_err = self._captured_out, self._captured_err
- if self._capture is not None:
- out, err = self._capture.readouterr()
- captured_out += out
- captured_err += err
- self._captured_out = self.captureclass.EMPTY_BUFFER
- self._captured_err = self.captureclass.EMPTY_BUFFER
- return CaptureResult(captured_out, captured_err)
-
- def _suspend(self) -> None:
- """Suspend this fixture's own capturing temporarily."""
- if self._capture is not None:
- self._capture.suspend_capturing()
-
- def _resume(self) -> None:
- """Resume this fixture's own capturing temporarily."""
- if self._capture is not None:
- self._capture.resume_capturing()
-
- def _is_started(self) -> bool:
- """Whether actively capturing -- not disabled or closed."""
- if self._capture is not None:
- return self._capture.is_started()
- return False
-
- @contextlib.contextmanager
- def disabled(self) -> Generator[None, None, None]:
- """Temporarily disable capturing while inside the ``with`` block."""
- capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
- with capmanager.global_and_fixture_disabled():
- yield
-
-
-# The fixtures.
-
-
-@fixture
-def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
- """Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
-
- The captured output is made available via ``capsys.readouterr()`` method
- calls, which return a ``(out, err)`` namedtuple.
- ``out`` and ``err`` will be ``text`` objects.
- """
- capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True)
- capman.set_fixture(capture_fixture)
- capture_fixture._start()
- yield capture_fixture
- capture_fixture.close()
- capman.unset_fixture()
-
-
-@fixture
-def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
- """Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
-
- The captured output is made available via ``capsysbinary.readouterr()``
- method calls, which return a ``(out, err)`` namedtuple.
- ``out`` and ``err`` will be ``bytes`` objects.
- """
- capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True)
- capman.set_fixture(capture_fixture)
- capture_fixture._start()
- yield capture_fixture
- capture_fixture.close()
- capman.unset_fixture()
-
-
-@fixture
-def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
- """Enable text capturing of writes to file descriptors ``1`` and ``2``.
-
- The captured output is made available via ``capfd.readouterr()`` method
- calls, which return a ``(out, err)`` namedtuple.
- ``out`` and ``err`` will be ``text`` objects.
- """
- capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True)
- capman.set_fixture(capture_fixture)
- capture_fixture._start()
- yield capture_fixture
- capture_fixture.close()
- capman.unset_fixture()
-
-
-@fixture
-def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
- """Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
-
- The captured output is made available via ``capfd.readouterr()`` method
- calls, which return a ``(out, err)`` namedtuple.
- ``out`` and ``err`` will be ``byte`` objects.
- """
- capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True)
- capman.set_fixture(capture_fixture)
- capture_fixture._start()
- yield capture_fixture
- capture_fixture.close()
- capman.unset_fixture()
diff --git a/contrib/python/pytest/py3/_pytest/compat.py b/contrib/python/pytest/py3/_pytest/compat.py
deleted file mode 100644
index 71c2518d77..0000000000
--- a/contrib/python/pytest/py3/_pytest/compat.py
+++ /dev/null
@@ -1,405 +0,0 @@
-"""Python version compatibility code."""
-import enum
-import functools
-import inspect
-import os
-import sys
-from inspect import Parameter
-from inspect import signature
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import Generic
-from typing import Optional
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import attr
-import py
-
-if TYPE_CHECKING:
- from typing import NoReturn
- from typing_extensions import Final
-
-
-_T = TypeVar("_T")
-_S = TypeVar("_S")
-
-#: constant to prepare valuing pylib path replacements/lazy proxies later on
-# intended for removal in pytest 8.0 or 9.0
-
-# fmt: off
-# intentional space to create a fake difference for the verification
-LEGACY_PATH = py.path. local
-# fmt: on
-
-
-def legacy_path(path: Union[str, "os.PathLike[str]"]) -> LEGACY_PATH:
- """Internal wrapper to prepare lazy proxies for legacy_path instances"""
- return LEGACY_PATH(path)
-
-
-# fmt: off
-# Singleton type for NOTSET, as described in:
-# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
-class NotSetType(enum.Enum):
- token = 0
-NOTSET: "Final" = NotSetType.token # noqa: E305
-# fmt: on
-
-if sys.version_info >= (3, 8):
- from importlib import metadata as importlib_metadata
-else:
- import importlib_metadata # noqa: F401
-
-
-def _format_args(func: Callable[..., Any]) -> str:
- return str(signature(func))
-
-
-def is_generator(func: object) -> bool:
- genfunc = inspect.isgeneratorfunction(func)
- return genfunc and not iscoroutinefunction(func)
-
-
-def iscoroutinefunction(func: object) -> bool:
- """Return True if func is a coroutine function (a function defined with async
- def syntax, and doesn't contain yield), or a function decorated with
- @asyncio.coroutine.
-
- Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
- importing asyncio directly, which in turns also initializes the "logging"
- module as a side-effect (see issue #8).
- """
- return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
-
-
-def is_async_function(func: object) -> bool:
- """Return True if the given function seems to be an async function or
- an async generator."""
- return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
-
-
-def getlocation(function, curdir: Optional[str] = None) -> str:
- function = get_real_func(function)
- fn = Path(inspect.getfile(function))
- lineno = function.__code__.co_firstlineno
- if curdir is not None:
- try:
- relfn = fn.relative_to(curdir)
- except ValueError:
- pass
- else:
- return "%s:%d" % (relfn, lineno + 1)
- return "%s:%d" % (fn, lineno + 1)
-
-
-def num_mock_patch_args(function) -> int:
- """Return number of arguments used up by mock arguments (if any)."""
- patchings = getattr(function, "patchings", None)
- if not patchings:
- return 0
-
- mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
- ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
-
- return len(
- [
- p
- for p in patchings
- if not p.attribute_name
- and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
- ]
- )
-
-
-def getfuncargnames(
- function: Callable[..., Any],
- *,
- name: str = "",
- is_method: bool = False,
- cls: Optional[type] = None,
-) -> Tuple[str, ...]:
- """Return the names of a function's mandatory arguments.
-
- Should return the names of all function arguments that:
- * Aren't bound to an instance or type as in instance or class methods.
- * Don't have default values.
- * Aren't bound with functools.partial.
- * Aren't replaced with mocks.
-
- The is_method and cls arguments indicate that the function should
- be treated as a bound method even though it's not unless, only in
- the case of cls, the function is a static method.
-
- The name parameter should be the original name in which the function was collected.
- """
- # TODO(RonnyPfannschmidt): This function should be refactored when we
- # revisit fixtures. The fixture mechanism should ask the node for
- # the fixture names, and not try to obtain directly from the
- # function object well after collection has occurred.
-
- # The parameters attribute of a Signature object contains an
- # ordered mapping of parameter names to Parameter instances. This
- # creates a tuple of the names of the parameters that don't have
- # defaults.
- try:
- parameters = signature(function).parameters
- except (ValueError, TypeError) as e:
- from _pytest.outcomes import fail
-
- fail(
- f"Could not determine arguments of {function!r}: {e}",
- pytrace=False,
- )
-
- arg_names = tuple(
- p.name
- for p in parameters.values()
- if (
- p.kind is Parameter.POSITIONAL_OR_KEYWORD
- or p.kind is Parameter.KEYWORD_ONLY
- )
- and p.default is Parameter.empty
- )
- if not name:
- name = function.__name__
-
- # If this function should be treated as a bound method even though
- # it's passed as an unbound method or function, remove the first
- # parameter name.
- if is_method or (
- # Not using `getattr` because we don't want to resolve the staticmethod.
- # Not using `cls.__dict__` because we want to check the entire MRO.
- cls
- and not isinstance(
- inspect.getattr_static(cls, name, default=None), staticmethod
- )
- ):
- arg_names = arg_names[1:]
- # Remove any names that will be replaced with mocks.
- if hasattr(function, "__wrapped__"):
- arg_names = arg_names[num_mock_patch_args(function) :]
- return arg_names
-
-
-def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
- # Note: this code intentionally mirrors the code at the beginning of
- # getfuncargnames, to get the arguments which were excluded from its result
- # because they had default values.
- return tuple(
- p.name
- for p in signature(function).parameters.values()
- if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
- and p.default is not Parameter.empty
- )
-
-
-_non_printable_ascii_translate_table = {
- i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
-}
-_non_printable_ascii_translate_table.update(
- {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
-)
-
-
-def _translate_non_printable(s: str) -> str:
- return s.translate(_non_printable_ascii_translate_table)
-
-
-STRING_TYPES = bytes, str
-
-
-def _bytes_to_ascii(val: bytes) -> str:
- return val.decode("ascii", "backslashreplace")
-
-
-def ascii_escaped(val: Union[bytes, str]) -> str:
- r"""If val is pure ASCII, return it as an str, otherwise, escape
- bytes objects into a sequence of escaped bytes:
-
- b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
-
- and escapes unicode objects into a sequence of escaped unicode
- ids, e.g.:
-
- r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
-
- Note:
- The obvious "v.decode('unicode-escape')" will return
- valid UTF-8 unicode if it finds them in bytes, but we
- want to return escaped bytes for any byte, even if they match
- a UTF-8 string.
- """
- if isinstance(val, bytes):
- ret = _bytes_to_ascii(val)
- else:
- ret = val
- return ret
-
-
-@attr.s
-class _PytestWrapper:
- """Dummy wrapper around a function object for internal use only.
-
- Used to correctly unwrap the underlying function object when we are
- creating fixtures, because we wrap the function object ourselves with a
- decorator to issue warnings when the fixture function is called directly.
- """
-
- obj = attr.ib()
-
-
-def get_real_func(obj):
- """Get the real function object of the (possibly) wrapped object by
- functools.wraps or functools.partial."""
- start_obj = obj
- for i in range(100):
- # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
- # to trigger a warning if it gets called directly instead of by pytest: we don't
- # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
- new_obj = getattr(obj, "__pytest_wrapped__", None)
- if isinstance(new_obj, _PytestWrapper):
- obj = new_obj.obj
- break
- new_obj = getattr(obj, "__wrapped__", None)
- if new_obj is None:
- break
- obj = new_obj
- else:
- from _pytest._io.saferepr import saferepr
-
- raise ValueError(
- ("could not find real function of {start}\nstopped at {current}").format(
- start=saferepr(start_obj), current=saferepr(obj)
- )
- )
- if isinstance(obj, functools.partial):
- obj = obj.func
- return obj
-
-
-def get_real_method(obj, holder):
- """Attempt to obtain the real function object that might be wrapping
- ``obj``, while at the same time returning a bound method to ``holder`` if
- the original object was a bound method."""
- try:
- is_method = hasattr(obj, "__func__")
- obj = get_real_func(obj)
- except Exception: # pragma: no cover
- return obj
- if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
- obj = obj.__get__(holder)
- return obj
-
-
-def getimfunc(func):
- try:
- return func.__func__
- except AttributeError:
- return func
-
-
-def safe_getattr(object: Any, name: str, default: Any) -> Any:
- """Like getattr but return default upon any Exception or any OutcomeException.
-
- Attribute access can potentially fail for 'evil' Python objects.
- See issue #214.
- It catches OutcomeException because of #2490 (issue #580), new outcomes
- are derived from BaseException instead of Exception (for more details
- check #2707).
- """
- from _pytest.outcomes import TEST_OUTCOME
-
- try:
- return getattr(object, name, default)
- except TEST_OUTCOME:
- return default
-
-
-def safe_isclass(obj: object) -> bool:
- """Ignore any exception via isinstance on Python 3."""
- try:
- return inspect.isclass(obj)
- except Exception:
- return False
-
-
-if TYPE_CHECKING:
- if sys.version_info >= (3, 8):
- from typing import final as final
- else:
- from typing_extensions import final as final
-elif sys.version_info >= (3, 8):
- from typing import final as final
-else:
-
- def final(f):
- return f
-
-
-if sys.version_info >= (3, 8):
- from functools import cached_property as cached_property
-else:
- from typing import overload
- from typing import Type
-
- class cached_property(Generic[_S, _T]):
- __slots__ = ("func", "__doc__")
-
- def __init__(self, func: Callable[[_S], _T]) -> None:
- self.func = func
- self.__doc__ = func.__doc__
-
- @overload
- def __get__(
- self, instance: None, owner: Optional[Type[_S]] = ...
- ) -> "cached_property[_S, _T]":
- ...
-
- @overload
- def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T:
- ...
-
- def __get__(self, instance, owner=None):
- if instance is None:
- return self
- value = instance.__dict__[self.func.__name__] = self.func(instance)
- return value
-
-
-# Perform exhaustiveness checking.
-#
-# Consider this example:
-#
-# MyUnion = Union[int, str]
-#
-# def handle(x: MyUnion) -> int {
-# if isinstance(x, int):
-# return 1
-# elif isinstance(x, str):
-# return 2
-# else:
-# raise Exception('unreachable')
-#
-# Now suppose we add a new variant:
-#
-# MyUnion = Union[int, str, bytes]
-#
-# After doing this, we must remember ourselves to go and update the handle
-# function to handle the new variant.
-#
-# With `assert_never` we can do better:
-#
-# // raise Exception('unreachable')
-# return assert_never(x)
-#
-# Now, if we forget to handle the new variant, the type-checker will emit a
-# compile-time error, instead of the runtime error we would have gotten
-# previously.
-#
-# This also work for Enums (if you use `is` to compare) and Literals.
-def assert_never(value: "NoReturn") -> "NoReturn":
- assert False, f"Unhandled value: {value} ({type(value).__name__})"
diff --git a/contrib/python/pytest/py3/_pytest/config/__init__.py b/contrib/python/pytest/py3/_pytest/config/__init__.py
deleted file mode 100644
index 91ad3f094f..0000000000
--- a/contrib/python/pytest/py3/_pytest/config/__init__.py
+++ /dev/null
@@ -1,1693 +0,0 @@
-"""Command line options, ini-file and conftest.py processing."""
-import argparse
-import collections.abc
-import copy
-import enum
-import inspect
-import os
-import re
-import shlex
-import sys
-import types
-import warnings
-from functools import lru_cache
-from pathlib import Path
-from textwrap import dedent
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Dict
-from typing import Generator
-from typing import IO
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import Sequence
-from typing import Set
-from typing import TextIO
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-from pluggy import HookimplMarker
-from pluggy import HookspecMarker
-from pluggy import PluginManager
-
-import _pytest._code
-import _pytest.deprecated
-import _pytest.hookspec
-from .exceptions import PrintHelp as PrintHelp
-from .exceptions import UsageError as UsageError
-from .findpaths import determine_setup
-from _pytest._code import ExceptionInfo
-from _pytest._code import filter_traceback
-from _pytest._io import TerminalWriter
-from _pytest.compat import final
-from _pytest.compat import importlib_metadata
-from _pytest.outcomes import fail
-from _pytest.outcomes import Skipped
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import import_path
-from _pytest.pathlib import ImportMode
-from _pytest.pathlib import resolve_package_path
-from _pytest.stash import Stash
-from _pytest.warning_types import PytestConfigWarning
-
-if TYPE_CHECKING:
-
- from _pytest._code.code import _TracebackStyle
- from _pytest.terminal import TerminalReporter
- from .argparsing import Argument
-
-
-_PluggyPlugin = object
-"""A type to represent plugin objects.
-
-Plugins can be any namespace, so we can't narrow it down much, but we use an
-alias to make the intent clear.
-
-Ideally this type would be provided by pluggy itself.
-"""
-
-
-hookimpl = HookimplMarker("pytest")
-hookspec = HookspecMarker("pytest")
-
-
-@final
-class ExitCode(enum.IntEnum):
- """Encodes the valid exit codes by pytest.
-
- Currently users and plugins may supply other exit codes as well.
-
- .. versionadded:: 5.0
- """
-
- #: Tests passed.
- OK = 0
- #: Tests failed.
- TESTS_FAILED = 1
- #: pytest was interrupted.
- INTERRUPTED = 2
- #: An internal error got in the way.
- INTERNAL_ERROR = 3
- #: pytest was misused.
- USAGE_ERROR = 4
- #: pytest couldn't find tests.
- NO_TESTS_COLLECTED = 5
-
-
-class ConftestImportFailure(Exception):
- def __init__(
- self,
- path: Path,
- excinfo: Tuple[Type[Exception], Exception, TracebackType],
- ) -> None:
- super().__init__(path, excinfo)
- self.path = path
- self.excinfo = excinfo
-
- def __str__(self) -> str:
- return "{}: {} (from {})".format(
- self.excinfo[0].__name__, self.excinfo[1], self.path
- )
-
-
-def filter_traceback_for_conftest_import_failure(
- entry: _pytest._code.TracebackEntry,
-) -> bool:
- """Filter tracebacks entries which point to pytest internals or importlib.
-
- Make a special case for importlib because we use it to import test modules and conftest files
- in _pytest.pathlib.import_path.
- """
- return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep)
-
-
-def main(
- args: Optional[Union[List[str], "os.PathLike[str]"]] = None,
- plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
-) -> Union[int, ExitCode]:
- """Perform an in-process test run.
-
- :param args: List of command line arguments.
- :param plugins: List of plugin objects to be auto-registered during initialization.
-
- :returns: An exit code.
- """
- try:
- try:
- config = _prepareconfig(args, plugins)
- except ConftestImportFailure as e:
- exc_info = ExceptionInfo.from_exc_info(e.excinfo)
- tw = TerminalWriter(sys.stderr)
- tw.line(f"ImportError while loading conftest '{e.path}'.", red=True)
- exc_info.traceback = exc_info.traceback.filter(
- filter_traceback_for_conftest_import_failure
- )
- exc_repr = (
- exc_info.getrepr(style="short", chain=False)
- if exc_info.traceback
- else exc_info.exconly()
- )
- formatted_tb = str(exc_repr)
- for line in formatted_tb.splitlines():
- tw.line(line.rstrip(), red=True)
- return ExitCode.USAGE_ERROR
- else:
- try:
- ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main(
- config=config
- )
- try:
- return ExitCode(ret)
- except ValueError:
- return ret
- finally:
- config._ensure_unconfigure()
- except UsageError as e:
- tw = TerminalWriter(sys.stderr)
- for msg in e.args:
- tw.line(f"ERROR: {msg}\n", red=True)
- return ExitCode.USAGE_ERROR
-
-
-def console_main() -> int:
- """The CLI entry point of pytest.
-
- This function is not meant for programmable use; use `main()` instead.
- """
- # https://docs.python.org/3/library/signal.html#note-on-sigpipe
- try:
- code = main()
- sys.stdout.flush()
- return code
- except BrokenPipeError:
- # Python flushes standard streams on exit; redirect remaining output
- # to devnull to avoid another BrokenPipeError at shutdown
- devnull = os.open(os.devnull, os.O_WRONLY)
- os.dup2(devnull, sys.stdout.fileno())
- return 1 # Python exits with error code 1 on EPIPE
-
-
-class cmdline: # compatibility namespace
- main = staticmethod(main)
-
-
-def filename_arg(path: str, optname: str) -> str:
- """Argparse type validator for filename arguments.
-
- :path: Path of filename.
- :optname: Name of the option.
- """
- if os.path.isdir(path):
- raise UsageError(f"{optname} must be a filename, given: {path}")
- return path
-
-
-def directory_arg(path: str, optname: str) -> str:
- """Argparse type validator for directory arguments.
-
- :path: Path of directory.
- :optname: Name of the option.
- """
- if not os.path.isdir(path):
- raise UsageError(f"{optname} must be a directory, given: {path}")
- return path
-
-
-# Plugins that cannot be disabled via "-p no:X" currently.
-essential_plugins = (
- "mark",
- "main",
- "runner",
- "fixtures",
- "helpconfig", # Provides -p.
-)
-
-default_plugins = essential_plugins + (
- "python",
- "terminal",
- "debugging",
- "unittest",
- "capture",
- "skipping",
- "legacypath",
- "tmpdir",
- "monkeypatch",
- "recwarn",
- "pastebin",
- "nose",
- "assertion",
- "junitxml",
- "doctest",
- "cacheprovider",
- "freeze_support",
- "setuponly",
- "setupplan",
- "stepwise",
- "warnings",
- "logging",
- "reports",
- "python_path",
- *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []),
- "faulthandler",
-)
-
-builtin_plugins = set(default_plugins)
-builtin_plugins.add("pytester")
-builtin_plugins.add("pytester_assertions")
-
-
-def get_config(
- args: Optional[List[str]] = None,
- plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
-) -> "Config":
- # subsequent calls to main will create a fresh instance
- pluginmanager = PytestPluginManager()
- config = Config(
- pluginmanager,
- invocation_params=Config.InvocationParams(
- args=args or (),
- plugins=plugins,
- dir=Path.cwd(),
- ),
- )
-
- if args is not None:
- # Handle any "-p no:plugin" args.
- pluginmanager.consider_preparse(args, exclude_only=True)
-
- for spec in default_plugins:
- pluginmanager.import_plugin(spec)
-
- return config
-
-
-def get_plugin_manager() -> "PytestPluginManager":
- """Obtain a new instance of the
- :py:class:`pytest.PytestPluginManager`, with default plugins
- already loaded.
-
- This function can be used by integration with other tools, like hooking
- into pytest to run tests into an IDE.
- """
- return get_config().pluginmanager
-
-
-def _prepareconfig(
- args: Optional[Union[List[str], "os.PathLike[str]"]] = None,
- plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
-) -> "Config":
- if args is None:
- args = sys.argv[1:]
- elif isinstance(args, os.PathLike):
- args = [os.fspath(args)]
- elif not isinstance(args, list):
- msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})"
- raise TypeError(msg.format(args, type(args)))
-
- config = get_config(args, plugins)
- pluginmanager = config.pluginmanager
- try:
- if plugins:
- for plugin in plugins:
- if isinstance(plugin, str):
- pluginmanager.consider_pluginarg(plugin)
- else:
- pluginmanager.register(plugin)
- config = pluginmanager.hook.pytest_cmdline_parse(
- pluginmanager=pluginmanager, args=args
- )
- return config
- except BaseException:
- config._ensure_unconfigure()
- raise
-
-
-def _get_directory(path: Path) -> Path:
- """Get the directory of a path - itself if already a directory."""
- if path.is_file():
- return path.parent
- else:
- return path
-
-
-@final
-class PytestPluginManager(PluginManager):
- """A :py:class:`pluggy.PluginManager <pluggy.PluginManager>` with
- additional pytest-specific functionality:
-
- * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
- ``pytest_plugins`` global variables found in plugins being loaded.
- * ``conftest.py`` loading during start-up.
- """
-
- def __init__(self) -> None:
- import _pytest.assertion
-
- super().__init__("pytest")
-
- # -- State related to local conftest plugins.
- # All loaded conftest modules.
- self._conftest_plugins: Set[types.ModuleType] = set()
- # All conftest modules applicable for a directory.
- # This includes the directory's own conftest modules as well
- # as those of its parent directories.
- self._dirpath2confmods: Dict[Path, List[types.ModuleType]] = {}
- # Cutoff directory above which conftests are no longer discovered.
- self._confcutdir: Optional[Path] = None
- # If set, conftest loading is skipped.
- self._noconftest = False
-
- # _getconftestmodules()'s call to _get_directory() causes a stat
- # storm when it's called potentially thousands of times in a test
- # session (#9478), often with the same path, so cache it.
- self._get_directory = lru_cache(256)(_get_directory)
-
- self._duplicatepaths: Set[Path] = set()
-
- # plugins that were explicitly skipped with pytest.skip
- # list of (module name, skip reason)
- # previously we would issue a warning when a plugin was skipped, but
- # since we refactored warnings as first citizens of Config, they are
- # just stored here to be used later.
- self.skipped_plugins: List[Tuple[str, str]] = []
-
- self.add_hookspecs(_pytest.hookspec)
- self.register(self)
- if os.environ.get("PYTEST_DEBUG"):
- err: IO[str] = sys.stderr
- encoding: str = getattr(err, "encoding", "utf8")
- try:
- err = open(
- os.dup(err.fileno()),
- mode=err.mode,
- buffering=1,
- encoding=encoding,
- )
- except Exception:
- pass
- self.trace.root.setwriter(err.write)
- self.enable_tracing()
-
- # Config._consider_importhook will set a real object if required.
- self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
- # Used to know when we are importing conftests after the pytest_configure stage.
- self._configured = False
-
- def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str):
- # pytest hooks are always prefixed with "pytest_",
- # so we avoid accessing possibly non-readable attributes
- # (see issue #1073).
- if not name.startswith("pytest_"):
- return
- # Ignore names which can not be hooks.
- if name == "pytest_plugins":
- return
-
- method = getattr(plugin, name)
- opts = super().parse_hookimpl_opts(plugin, name)
-
- # Consider only actual functions for hooks (#3775).
- if not inspect.isroutine(method):
- return
-
- # Collect unmarked hooks as long as they have the `pytest_' prefix.
- if opts is None and name.startswith("pytest_"):
- opts = {}
- if opts is not None:
- # TODO: DeprecationWarning, people should use hookimpl
- # https://github.com/pytest-dev/pytest/issues/4562
- known_marks = {m.name for m in getattr(method, "pytestmark", [])}
-
- for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
- opts.setdefault(name, hasattr(method, name) or name in known_marks)
- return opts
-
- def parse_hookspec_opts(self, module_or_class, name: str):
- opts = super().parse_hookspec_opts(module_or_class, name)
- if opts is None:
- method = getattr(module_or_class, name)
-
- if name.startswith("pytest_"):
- # todo: deprecate hookspec hacks
- # https://github.com/pytest-dev/pytest/issues/4562
- known_marks = {m.name for m in getattr(method, "pytestmark", [])}
- opts = {
- "firstresult": hasattr(method, "firstresult")
- or "firstresult" in known_marks,
- "historic": hasattr(method, "historic")
- or "historic" in known_marks,
- }
- return opts
-
- def register(
- self, plugin: _PluggyPlugin, name: Optional[str] = None
- ) -> Optional[str]:
- if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS:
- warnings.warn(
- PytestConfigWarning(
- "{} plugin has been merged into the core, "
- "please remove it from your requirements.".format(
- name.replace("_", "-")
- )
- )
- )
- return None
- ret: Optional[str] = super().register(plugin, name)
- if ret:
- self.hook.pytest_plugin_registered.call_historic(
- kwargs=dict(plugin=plugin, manager=self)
- )
-
- if isinstance(plugin, types.ModuleType):
- self.consider_module(plugin)
- return ret
-
- def getplugin(self, name: str):
- # Support deprecated naming because plugins (xdist e.g.) use it.
- plugin: Optional[_PluggyPlugin] = self.get_plugin(name)
- return plugin
-
- def hasplugin(self, name: str) -> bool:
- """Return whether a plugin with the given name is registered."""
- return bool(self.get_plugin(name))
-
- def pytest_configure(self, config: "Config") -> None:
- """:meta private:"""
- # XXX now that the pluginmanager exposes hookimpl(tryfirst...)
- # we should remove tryfirst/trylast as markers.
- config.addinivalue_line(
- "markers",
- "tryfirst: mark a hook implementation function such that the "
- "plugin machinery will try to call it first/as early as possible.",
- )
- config.addinivalue_line(
- "markers",
- "trylast: mark a hook implementation function such that the "
- "plugin machinery will try to call it last/as late as possible.",
- )
- self._configured = True
-
- #
- # Internal API for local conftest plugin handling.
- #
- def _set_initial_conftests(
- self, namespace: argparse.Namespace, rootpath: Path
- ) -> None:
- """Load initial conftest files given a preparsed "namespace".
-
- As conftest files may add their own command line options which have
- arguments ('--my-opt somepath') we might get some false positives.
- All builtin and 3rd party plugins will have been loaded, however, so
- common options will not confuse our logic here.
- """
- current = Path.cwd()
- self._confcutdir = (
- absolutepath(current / namespace.confcutdir)
- if namespace.confcutdir
- else None
- )
- self._noconftest = namespace.noconftest
- self._using_pyargs = namespace.pyargs
- testpaths = namespace.file_or_dir
- foundanchor = False
- for testpath in testpaths:
- path = str(testpath)
- # remove node-id syntax
- i = path.find("::")
- if i != -1:
- path = path[:i]
- anchor = absolutepath(current / path)
- if anchor.exists(): # we found some file object
- self._try_load_conftest(anchor, namespace.importmode, rootpath)
- foundanchor = True
- if not foundanchor:
- self._try_load_conftest(current, namespace.importmode, rootpath)
-
- def _is_in_confcutdir(self, path: Path) -> bool:
- """Whether a path is within the confcutdir.
-
- When false, should not load conftest.
- """
- if self._confcutdir is None:
- return True
- return path not in self._confcutdir.parents
-
- def _try_load_conftest(
- self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path
- ) -> None:
- self._getconftestmodules(anchor, importmode, rootpath)
- # let's also consider test* subdirs
- if anchor.is_dir():
- for x in anchor.glob("test*"):
- if x.is_dir():
- self._getconftestmodules(x, importmode, rootpath)
-
- def _getconftestmodules(
- self, path: Path, importmode: Union[str, ImportMode], rootpath: Path
- ) -> Sequence[types.ModuleType]:
- if self._noconftest:
- return []
-
- directory = self._get_directory(path)
-
- # Optimization: avoid repeated searches in the same directory.
- # Assumes always called with same importmode and rootpath.
- existing_clist = self._dirpath2confmods.get(directory)
- if existing_clist is not None:
- return existing_clist
-
- # XXX these days we may rather want to use config.rootpath
- # and allow users to opt into looking into the rootdir parent
- # directories instead of requiring to specify confcutdir.
- clist = []
- for parent in reversed((directory, *directory.parents)):
- if self._is_in_confcutdir(parent):
- conftestpath = parent / "conftest.py"
- if conftestpath.is_file():
- mod = self._importconftest(conftestpath, importmode, rootpath)
- clist.append(mod)
- self._dirpath2confmods[directory] = clist
- return clist
-
- def _rget_with_confmod(
- self,
- name: str,
- path: Path,
- importmode: Union[str, ImportMode],
- rootpath: Path,
- ) -> Tuple[types.ModuleType, Any]:
- modules = self._getconftestmodules(path, importmode, rootpath=rootpath)
- for mod in reversed(modules):
- try:
- return mod, getattr(mod, name)
- except AttributeError:
- continue
- raise KeyError(name)
-
- def _importconftest(
- self, conftestpath: Path, importmode: Union[str, ImportMode], rootpath: Path
- ) -> types.ModuleType:
- existing = self.get_plugin(str(conftestpath))
- if existing is not None:
- return cast(types.ModuleType, existing)
-
- pkgpath = resolve_package_path(conftestpath)
- if pkgpath is None:
- _ensure_removed_sysmodule(conftestpath.stem)
-
- try:
- mod = import_path(conftestpath, mode=importmode, root=rootpath)
- except Exception as e:
- assert e.__traceback__ is not None
- exc_info = (type(e), e, e.__traceback__)
- raise ConftestImportFailure(conftestpath, exc_info) from e
-
- self._check_non_top_pytest_plugins(mod, conftestpath)
-
- self._conftest_plugins.add(mod)
- dirpath = conftestpath.parent
- if dirpath in self._dirpath2confmods:
- for path, mods in self._dirpath2confmods.items():
- if dirpath in path.parents or path == dirpath:
- assert mod not in mods
- mods.append(mod)
- self.trace(f"loading conftestmodule {mod!r}")
- self.consider_conftest(mod)
- return mod
-
- def _check_non_top_pytest_plugins(
- self,
- mod: types.ModuleType,
- conftestpath: Path,
- ) -> None:
- if (
- hasattr(mod, "pytest_plugins")
- and self._configured
- and not self._using_pyargs
- ):
- msg = (
- "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n"
- "It affects the entire test suite instead of just below the conftest as expected.\n"
- " {}\n"
- "Please move it to a top level conftest file at the rootdir:\n"
- " {}\n"
- "For more information, visit:\n"
- " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
- )
- fail(msg.format(conftestpath, self._confcutdir), pytrace=False)
-
- #
- # API for bootstrapping plugin loading
- #
- #
-
- def consider_preparse(
- self, args: Sequence[str], *, exclude_only: bool = False
- ) -> None:
- """:meta private:"""
- i = 0
- n = len(args)
- while i < n:
- opt = args[i]
- i += 1
- if isinstance(opt, str):
- if opt == "-p":
- try:
- parg = args[i]
- except IndexError:
- return
- i += 1
- elif opt.startswith("-p"):
- parg = opt[2:]
- else:
- continue
- if exclude_only and not parg.startswith("no:"):
- continue
- self.consider_pluginarg(parg)
-
- def consider_pluginarg(self, arg: str) -> None:
- """:meta private:"""
- if arg.startswith("no:"):
- name = arg[3:]
- if name in essential_plugins:
- raise UsageError("plugin %s cannot be disabled" % name)
-
- # PR #4304: remove stepwise if cacheprovider is blocked.
- if name == "cacheprovider":
- self.set_blocked("stepwise")
- self.set_blocked("pytest_stepwise")
-
- self.set_blocked(name)
- if not name.startswith("pytest_"):
- self.set_blocked("pytest_" + name)
- else:
- name = arg
- # Unblock the plugin. None indicates that it has been blocked.
- # There is no interface with pluggy for this.
- if self._name2plugin.get(name, -1) is None:
- del self._name2plugin[name]
- if not name.startswith("pytest_"):
- if self._name2plugin.get("pytest_" + name, -1) is None:
- del self._name2plugin["pytest_" + name]
- self.import_plugin(arg, consider_entry_points=True)
-
- def consider_conftest(self, conftestmodule: types.ModuleType) -> None:
- """:meta private:"""
- self.register(conftestmodule, name=conftestmodule.__file__)
-
- def consider_env(self) -> None:
- """:meta private:"""
- self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
-
- def consider_module(self, mod: types.ModuleType) -> None:
- """:meta private:"""
- self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
-
- def _import_plugin_specs(
- self, spec: Union[None, types.ModuleType, str, Sequence[str]]
- ) -> None:
- plugins = _get_plugin_specs_as_list(spec)
- for import_spec in plugins:
- self.import_plugin(import_spec)
-
- def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None:
- """Import a plugin with ``modname``.
-
- If ``consider_entry_points`` is True, entry point names are also
- considered to find a plugin.
- """
- # Most often modname refers to builtin modules, e.g. "pytester",
- # "terminal" or "capture". Those plugins are registered under their
- # basename for historic purposes but must be imported with the
- # _pytest prefix.
- assert isinstance(modname, str), (
- "module name as text required, got %r" % modname
- )
- if self.is_blocked(modname) or self.get_plugin(modname) is not None:
- return
-
- importspec = "_pytest." + modname if modname in builtin_plugins else modname
- self.rewrite_hook.mark_rewrite(importspec)
-
- if consider_entry_points:
- loaded = self.load_setuptools_entrypoints("pytest11", name=modname)
- if loaded:
- return
-
- try:
- __import__(importspec)
- except ImportError as e:
- raise ImportError(
- f'Error importing plugin "{modname}": {e.args[0]}'
- ).with_traceback(e.__traceback__) from e
-
- except Skipped as e:
- self.skipped_plugins.append((modname, e.msg or ""))
- else:
- mod = sys.modules[importspec]
- self.register(mod, modname)
-
-
-def _get_plugin_specs_as_list(
- specs: Union[None, types.ModuleType, str, Sequence[str]]
-) -> List[str]:
- """Parse a plugins specification into a list of plugin names."""
- # None means empty.
- if specs is None:
- return []
- # Workaround for #3899 - a submodule which happens to be called "pytest_plugins".
- if isinstance(specs, types.ModuleType):
- return []
- # Comma-separated list.
- if isinstance(specs, str):
- return specs.split(",") if specs else []
- # Direct specification.
- if isinstance(specs, collections.abc.Sequence):
- return list(specs)
- raise UsageError(
- "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r"
- % specs
- )
-
-
-def _ensure_removed_sysmodule(modname: str) -> None:
- try:
- del sys.modules[modname]
- except KeyError:
- pass
-
-
-class Notset:
- def __repr__(self):
- return "<NOTSET>"
-
-
-notset = Notset()
-
-
-def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]:
- """Given an iterable of file names in a source distribution, return the "names" that should
- be marked for assertion rewrite.
-
- For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in
- the assertion rewrite mechanism.
-
- This function has to deal with dist-info based distributions and egg based distributions
- (which are still very much in use for "editable" installs).
-
- Here are the file names as seen in a dist-info based distribution:
-
- pytest_mock/__init__.py
- pytest_mock/_version.py
- pytest_mock/plugin.py
- pytest_mock.egg-info/PKG-INFO
-
- Here are the file names as seen in an egg based distribution:
-
- src/pytest_mock/__init__.py
- src/pytest_mock/_version.py
- src/pytest_mock/plugin.py
- src/pytest_mock.egg-info/PKG-INFO
- LICENSE
- setup.py
-
- We have to take in account those two distribution flavors in order to determine which
- names should be considered for assertion rewriting.
-
- More information:
- https://github.com/pytest-dev/pytest-mock/issues/167
- """
- package_files = list(package_files)
- seen_some = False
- for fn in package_files:
- is_simple_module = "/" not in fn and fn.endswith(".py")
- is_package = fn.count("/") == 1 and fn.endswith("__init__.py")
- if is_simple_module:
- module_name, _ = os.path.splitext(fn)
- # we ignore "setup.py" at the root of the distribution
- if module_name != "setup":
- seen_some = True
- yield module_name
- elif is_package:
- package_name = os.path.dirname(fn)
- seen_some = True
- yield package_name
-
- if not seen_some:
- # At this point we did not find any packages or modules suitable for assertion
- # rewriting, so we try again by stripping the first path component (to account for
- # "src" based source trees for example).
- # This approach lets us have the common case continue to be fast, as egg-distributions
- # are rarer.
- new_package_files = []
- for fn in package_files:
- parts = fn.split("/")
- new_fn = "/".join(parts[1:])
- if new_fn:
- new_package_files.append(new_fn)
- if new_package_files:
- yield from _iter_rewritable_modules(new_package_files)
-
-
-def _args_converter(args: Iterable[str]) -> Tuple[str, ...]:
- return tuple(args)
-
-
-@final
-class Config:
- """Access to configuration values, pluginmanager and plugin hooks.
-
- :param PytestPluginManager pluginmanager:
- A pytest PluginManager.
-
- :param InvocationParams invocation_params:
- Object containing parameters regarding the :func:`pytest.main`
- invocation.
- """
-
- @final
- @attr.s(frozen=True, auto_attribs=True)
- class InvocationParams:
- """Holds parameters passed during :func:`pytest.main`.
-
- The object attributes are read-only.
-
- .. versionadded:: 5.1
-
- .. note::
-
- Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts``
- ini option are handled by pytest, not being included in the ``args`` attribute.
-
- Plugins accessing ``InvocationParams`` must be aware of that.
- """
-
- args: Tuple[str, ...] = attr.ib(converter=_args_converter)
- """The command-line arguments as passed to :func:`pytest.main`."""
- plugins: Optional[Sequence[Union[str, _PluggyPlugin]]]
- """Extra plugins, might be `None`."""
- dir: Path
- """The directory from which :func:`pytest.main` was invoked."""
-
- def __init__(
- self,
- pluginmanager: PytestPluginManager,
- *,
- invocation_params: Optional[InvocationParams] = None,
- ) -> None:
- from .argparsing import Parser, FILE_OR_DIR
-
- if invocation_params is None:
- invocation_params = self.InvocationParams(
- args=(), plugins=None, dir=Path.cwd()
- )
-
- self.option = argparse.Namespace()
- """Access to command line option as attributes.
-
- :type: argparse.Namespace
- """
-
- self.invocation_params = invocation_params
- """The parameters with which pytest was invoked.
-
- :type: InvocationParams
- """
-
- _a = FILE_OR_DIR
- self._parser = Parser(
- usage=f"%(prog)s [options] [{_a}] [{_a}] [...]",
- processopt=self._processopt,
- _ispytest=True,
- )
- self.pluginmanager = pluginmanager
- """The plugin manager handles plugin registration and hook invocation.
-
- :type: PytestPluginManager
- """
-
- self.stash = Stash()
- """A place where plugins can store information on the config for their
- own use.
-
- :type: Stash
- """
- # Deprecated alias. Was never public. Can be removed in a few releases.
- self._store = self.stash
-
- from .compat import PathAwareHookProxy
-
- self.trace = self.pluginmanager.trace.root.get("config")
- self.hook = PathAwareHookProxy(self.pluginmanager.hook)
- self._inicache: Dict[str, Any] = {}
- self._override_ini: Sequence[str] = ()
- self._opt2dest: Dict[str, str] = {}
- self._cleanup: List[Callable[[], None]] = []
- self.pluginmanager.register(self, "pytestconfig")
- self._configured = False
- self.hook.pytest_addoption.call_historic(
- kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager)
- )
-
- if TYPE_CHECKING:
- from _pytest.cacheprovider import Cache
-
- self.cache: Optional[Cache] = None
-
- @property
- def rootpath(self) -> Path:
- """The path to the :ref:`rootdir <rootdir>`.
-
- :type: pathlib.Path
-
- .. versionadded:: 6.1
- """
- return self._rootpath
-
- @property
- def inipath(self) -> Optional[Path]:
- """The path to the :ref:`configfile <configfiles>`.
-
- :type: Optional[pathlib.Path]
-
- .. versionadded:: 6.1
- """
- return self._inipath
-
- def add_cleanup(self, func: Callable[[], None]) -> None:
- """Add a function to be called when the config object gets out of
- use (usually coinciding with pytest_unconfigure)."""
- self._cleanup.append(func)
-
- def _do_configure(self) -> None:
- assert not self._configured
- self._configured = True
- with warnings.catch_warnings():
- warnings.simplefilter("default")
- self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
-
- def _ensure_unconfigure(self) -> None:
- if self._configured:
- self._configured = False
- self.hook.pytest_unconfigure(config=self)
- self.hook.pytest_configure._call_history = []
- while self._cleanup:
- fin = self._cleanup.pop()
- fin()
-
- def get_terminal_writer(self) -> TerminalWriter:
- terminalreporter: TerminalReporter = self.pluginmanager.get_plugin(
- "terminalreporter"
- )
- return terminalreporter._tw
-
- def pytest_cmdline_parse(
- self, pluginmanager: PytestPluginManager, args: List[str]
- ) -> "Config":
- try:
- self.parse(args)
- except UsageError:
-
- # Handle --version and --help here in a minimal fashion.
- # This gets done via helpconfig normally, but its
- # pytest_cmdline_main is not called in case of errors.
- if getattr(self.option, "version", False) or "--version" in args:
- from _pytest.helpconfig import showversion
-
- showversion(self)
- elif (
- getattr(self.option, "help", False) or "--help" in args or "-h" in args
- ):
- self._parser._getparser().print_help()
- sys.stdout.write(
- "\nNOTE: displaying only minimal help due to UsageError.\n\n"
- )
-
- raise
-
- return self
-
- def notify_exception(
- self,
- excinfo: ExceptionInfo[BaseException],
- option: Optional[argparse.Namespace] = None,
- ) -> None:
- if option and getattr(option, "fulltrace", False):
- style: _TracebackStyle = "long"
- else:
- style = "native"
- excrepr = excinfo.getrepr(
- funcargs=True, showlocals=getattr(option, "showlocals", False), style=style
- )
- res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
- if not any(res):
- for line in str(excrepr).split("\n"):
- sys.stderr.write("INTERNALERROR> %s\n" % line)
- sys.stderr.flush()
-
- def cwd_relative_nodeid(self, nodeid: str) -> str:
- # nodeid's are relative to the rootpath, compute relative to cwd.
- if self.invocation_params.dir != self.rootpath:
- fullpath = self.rootpath / nodeid
- nodeid = bestrelpath(self.invocation_params.dir, fullpath)
- return nodeid
-
- @classmethod
- def fromdictargs(cls, option_dict, args) -> "Config":
- """Constructor usable for subprocesses."""
- config = get_config(args)
- config.option.__dict__.update(option_dict)
- config.parse(args, addopts=False)
- for x in config.option.plugins:
- config.pluginmanager.consider_pluginarg(x)
- return config
-
- def _processopt(self, opt: "Argument") -> None:
- for name in opt._short_opts + opt._long_opts:
- self._opt2dest[name] = opt.dest
-
- if hasattr(opt, "default"):
- if not hasattr(self.option, opt.dest):
- setattr(self.option, opt.dest, opt.default)
-
- @hookimpl(trylast=True)
- def pytest_load_initial_conftests(self, early_config: "Config") -> None:
- self.pluginmanager._set_initial_conftests(
- early_config.known_args_namespace, rootpath=early_config.rootpath
- )
-
- def _initini(self, args: Sequence[str]) -> None:
- ns, unknown_args = self._parser.parse_known_and_unknown_args(
- args, namespace=copy.copy(self.option)
- )
- rootpath, inipath, inicfg = determine_setup(
- ns.inifilename,
- ns.file_or_dir + unknown_args,
- rootdir_cmd_arg=ns.rootdir or None,
- config=self,
- )
- self._rootpath = rootpath
- self._inipath = inipath
- self.inicfg = inicfg
- self._parser.extra_info["rootdir"] = str(self.rootpath)
- self._parser.extra_info["inifile"] = str(self.inipath)
- self._parser.addini("addopts", "extra command line options", "args")
- self._parser.addini("minversion", "minimally required pytest version")
- self._parser.addini(
- "required_plugins",
- "plugins that must be present for pytest to run",
- type="args",
- default=[],
- )
- self._override_ini = ns.override_ini or ()
-
- def _consider_importhook(self, args: Sequence[str]) -> None:
- """Install the PEP 302 import hook if using assertion rewriting.
-
- Needs to parse the --assert=<mode> option from the commandline
- and find all the installed plugins to mark them for rewriting
- by the importhook.
- """
- ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
- mode = getattr(ns, "assertmode", "plain")
- if mode == "rewrite":
- import _pytest.assertion
-
- try:
- hook = _pytest.assertion.install_importhook(self)
- except SystemError:
- mode = "plain"
- else:
- self._mark_plugins_for_rewrite(hook)
- self._warn_about_missing_assertion(mode)
-
- def _mark_plugins_for_rewrite(self, hook) -> None:
- """Given an importhook, mark for rewrite any top-level
- modules or packages in the distribution package for
- all pytest plugins."""
- self.pluginmanager.rewrite_hook = hook
-
- if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
- # We don't autoload from setuptools entry points, no need to continue.
- return
-
- package_files = (
- str(file)
- for dist in importlib_metadata.distributions()
- if any(ep.group == "pytest11" for ep in dist.entry_points)
- for file in dist.files or []
- )
-
- for name in _iter_rewritable_modules(package_files):
- hook.mark_rewrite(name)
-
- def _validate_args(self, args: List[str], via: str) -> List[str]:
- """Validate known args."""
- self._parser._config_source_hint = via # type: ignore
- try:
- self._parser.parse_known_and_unknown_args(
- args, namespace=copy.copy(self.option)
- )
- finally:
- del self._parser._config_source_hint # type: ignore
-
- return args
-
- def _preparse(self, args: List[str], addopts: bool = True) -> None:
- if addopts:
- env_addopts = os.environ.get("PYTEST_ADDOPTS", "")
- if len(env_addopts):
- args[:] = (
- self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS")
- + args
- )
- self._initini(args)
- if addopts:
- args[:] = (
- self._validate_args(self.getini("addopts"), "via addopts config") + args
- )
-
- self.known_args_namespace = self._parser.parse_known_args(
- args, namespace=copy.copy(self.option)
- )
- self._checkversion()
- self._consider_importhook(args)
- self.pluginmanager.consider_preparse(args, exclude_only=False)
- if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
- # Don't autoload from setuptools entry point. Only explicitly specified
- # plugins are going to be loaded.
- self.pluginmanager.load_setuptools_entrypoints("pytest11")
- self.pluginmanager.consider_env()
-
- self.known_args_namespace = self._parser.parse_known_args(
- args, namespace=copy.copy(self.known_args_namespace)
- )
-
- self._validate_plugins()
- self._warn_about_skipped_plugins()
-
- if self.known_args_namespace.strict:
- self.issue_config_time_warning(
- _pytest.deprecated.STRICT_OPTION, stacklevel=2
- )
-
- if self.known_args_namespace.confcutdir is None and self.inipath is not None:
- confcutdir = str(self.inipath.parent)
- self.known_args_namespace.confcutdir = confcutdir
- try:
- self.hook.pytest_load_initial_conftests(
- early_config=self, args=args, parser=self._parser
- )
- except ConftestImportFailure as e:
- if self.known_args_namespace.help or self.known_args_namespace.version:
- # we don't want to prevent --help/--version to work
- # so just let is pass and print a warning at the end
- self.issue_config_time_warning(
- PytestConfigWarning(f"could not load initial conftests: {e.path}"),
- stacklevel=2,
- )
- else:
- raise
-
- @hookimpl(hookwrapper=True)
- def pytest_collection(self) -> Generator[None, None, None]:
- # Validate invalid ini keys after collection is done so we take in account
- # options added by late-loading conftest files.
- yield
- self._validate_config_options()
-
- def _checkversion(self) -> None:
- import pytest
-
- minver = self.inicfg.get("minversion", None)
- if minver:
- # Imported lazily to improve start-up time.
- from packaging.version import Version
-
- if not isinstance(minver, str):
- raise pytest.UsageError(
- "%s: 'minversion' must be a single value" % self.inipath
- )
-
- if Version(minver) > Version(pytest.__version__):
- raise pytest.UsageError(
- "%s: 'minversion' requires pytest-%s, actual pytest-%s'"
- % (
- self.inipath,
- minver,
- pytest.__version__,
- )
- )
-
- def _validate_config_options(self) -> None:
- for key in sorted(self._get_unknown_ini_keys()):
- self._warn_or_fail_if_strict(f"Unknown config option: {key}\n")
-
- def _validate_plugins(self) -> None:
- required_plugins = sorted(self.getini("required_plugins"))
- if not required_plugins:
- return
-
- # Imported lazily to improve start-up time.
- from packaging.version import Version
- from packaging.requirements import InvalidRequirement, Requirement
-
- plugin_info = self.pluginmanager.list_plugin_distinfo()
- plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info}
-
- missing_plugins = []
- for required_plugin in required_plugins:
- try:
- req = Requirement(required_plugin)
- except InvalidRequirement:
- missing_plugins.append(required_plugin)
- continue
-
- if req.name not in plugin_dist_info:
- missing_plugins.append(required_plugin)
- elif not req.specifier.contains(
- Version(plugin_dist_info[req.name]), prereleases=True
- ):
- missing_plugins.append(required_plugin)
-
- if missing_plugins:
- raise UsageError(
- "Missing required plugins: {}".format(", ".join(missing_plugins)),
- )
-
- def _warn_or_fail_if_strict(self, message: str) -> None:
- if self.known_args_namespace.strict_config:
- raise UsageError(message)
-
- self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3)
-
- def _get_unknown_ini_keys(self) -> List[str]:
- parser_inicfg = self._parser._inidict
- return [name for name in self.inicfg if name not in parser_inicfg]
-
- def parse(self, args: List[str], addopts: bool = True) -> None:
- # Parse given cmdline arguments into this config object.
- assert not hasattr(
- self, "args"
- ), "can only parse cmdline args at most once per Config object"
- self.hook.pytest_addhooks.call_historic(
- kwargs=dict(pluginmanager=self.pluginmanager)
- )
- self._preparse(args, addopts=addopts)
- # XXX deprecated hook:
- self.hook.pytest_cmdline_preparse(config=self, args=args)
- self._parser.after_preparse = True # type: ignore
- try:
- args = self._parser.parse_setoption(
- args, self.option, namespace=self.option
- )
- if not args:
- if self.invocation_params.dir == self.rootpath:
- args = self.getini("testpaths")
- if not args:
- args = [str(self.invocation_params.dir)]
- self.args = args
- except PrintHelp:
- pass
-
- def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None:
- """Issue and handle a warning during the "configure" stage.
-
- During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item``
- function because it is not possible to have hookwrappers around ``pytest_configure``.
-
- This function is mainly intended for plugins that need to issue warnings during
- ``pytest_configure`` (or similar stages).
-
- :param warning: The warning instance.
- :param stacklevel: stacklevel forwarded to warnings.warn.
- """
- if self.pluginmanager.is_blocked("warnings"):
- return
-
- cmdline_filters = self.known_args_namespace.pythonwarnings or []
- config_filters = self.getini("filterwarnings")
-
- with warnings.catch_warnings(record=True) as records:
- warnings.simplefilter("always", type(warning))
- apply_warning_filters(config_filters, cmdline_filters)
- warnings.warn(warning, stacklevel=stacklevel)
-
- if records:
- frame = sys._getframe(stacklevel - 1)
- location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name
- self.hook.pytest_warning_recorded.call_historic(
- kwargs=dict(
- warning_message=records[0],
- when="config",
- nodeid="",
- location=location,
- )
- )
-
- def addinivalue_line(self, name: str, line: str) -> None:
- """Add a line to an ini-file option. The option must have been
- declared but might not yet be set in which case the line becomes
- the first line in its value."""
- x = self.getini(name)
- assert isinstance(x, list)
- x.append(line) # modifies the cached list inline
-
- def getini(self, name: str):
- """Return configuration value from an :ref:`ini file <configfiles>`.
-
- If the specified name hasn't been registered through a prior
- :func:`parser.addini <pytest.Parser.addini>` call (usually from a
- plugin), a ValueError is raised.
- """
- try:
- return self._inicache[name]
- except KeyError:
- self._inicache[name] = val = self._getini(name)
- return val
-
- # Meant for easy monkeypatching by legacypath plugin.
- # Can be inlined back (with no cover removed) once legacypath is gone.
- def _getini_unknown_type(self, name: str, type: str, value: Union[str, List[str]]):
- msg = f"unknown configuration type: {type}"
- raise ValueError(msg, value) # pragma: no cover
-
- def _getini(self, name: str):
- try:
- description, type, default = self._parser._inidict[name]
- except KeyError as e:
- raise ValueError(f"unknown configuration value: {name!r}") from e
- override_value = self._get_override_ini_value(name)
- if override_value is None:
- try:
- value = self.inicfg[name]
- except KeyError:
- if default is not None:
- return default
- if type is None:
- return ""
- return []
- else:
- value = override_value
- # Coerce the values based on types.
- #
- # Note: some coercions are only required if we are reading from .ini files, because
- # the file format doesn't contain type information, but when reading from toml we will
- # get either str or list of str values (see _parse_ini_config_from_pyproject_toml).
- # For example:
- #
- # ini:
- # a_line_list = "tests acceptance"
- # in this case, we need to split the string to obtain a list of strings.
- #
- # toml:
- # a_line_list = ["tests", "acceptance"]
- # in this case, we already have a list ready to use.
- #
- if type == "paths":
- # TODO: This assert is probably not valid in all cases.
- assert self.inipath is not None
- dp = self.inipath.parent
- input_values = shlex.split(value) if isinstance(value, str) else value
- return [dp / x for x in input_values]
- elif type == "args":
- return shlex.split(value) if isinstance(value, str) else value
- elif type == "linelist":
- if isinstance(value, str):
- return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
- else:
- return value
- elif type == "bool":
- return _strtobool(str(value).strip())
- elif type == "string":
- return value
- elif type is None:
- return value
- else:
- return self._getini_unknown_type(name, type, value)
-
- def _getconftest_pathlist(
- self, name: str, path: Path, rootpath: Path
- ) -> Optional[List[Path]]:
- try:
- mod, relroots = self.pluginmanager._rget_with_confmod(
- name, path, self.getoption("importmode"), rootpath
- )
- except KeyError:
- return None
- assert mod.__file__ is not None
- modpath = Path(mod.__file__).parent
- values: List[Path] = []
- for relroot in relroots:
- if isinstance(relroot, os.PathLike):
- relroot = Path(relroot)
- else:
- relroot = relroot.replace("/", os.sep)
- relroot = absolutepath(modpath / relroot)
- values.append(relroot)
- return values
-
- def _get_override_ini_value(self, name: str) -> Optional[str]:
- value = None
- # override_ini is a list of "ini=value" options.
- # Always use the last item if multiple values are set for same ini-name,
- # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2.
- for ini_config in self._override_ini:
- try:
- key, user_ini_value = ini_config.split("=", 1)
- except ValueError as e:
- raise UsageError(
- "-o/--override-ini expects option=value style (got: {!r}).".format(
- ini_config
- )
- ) from e
- else:
- if key == name:
- value = user_ini_value
- return value
-
- def getoption(self, name: str, default=notset, skip: bool = False):
- """Return command line option value.
-
- :param name: Name of the option. You may also specify
- the literal ``--OPT`` option instead of the "dest" option name.
- :param default: Default value if no option of that name exists.
- :param skip: If True, raise pytest.skip if option does not exists
- or has a None value.
- """
- name = self._opt2dest.get(name, name)
- try:
- val = getattr(self.option, name)
- if val is None and skip:
- raise AttributeError(name)
- return val
- except AttributeError as e:
- if default is not notset:
- return default
- if skip:
- import pytest
-
- pytest.skip(f"no {name!r} option found")
- raise ValueError(f"no option named {name!r}") from e
-
- def getvalue(self, name: str, path=None):
- """Deprecated, use getoption() instead."""
- return self.getoption(name)
-
- def getvalueorskip(self, name: str, path=None):
- """Deprecated, use getoption(skip=True) instead."""
- return self.getoption(name, skip=True)
-
- def _warn_about_missing_assertion(self, mode: str) -> None:
- if not _assertion_supported():
- if mode == "plain":
- warning_text = (
- "ASSERTIONS ARE NOT EXECUTED"
- " and FAILING TESTS WILL PASS. Are you"
- " using python -O?"
- )
- else:
- warning_text = (
- "assertions not in test modules or"
- " plugins will be ignored"
- " because assert statements are not executed "
- "by the underlying Python interpreter "
- "(are you using python -O?)\n"
- )
- self.issue_config_time_warning(
- PytestConfigWarning(warning_text),
- stacklevel=3,
- )
-
- def _warn_about_skipped_plugins(self) -> None:
- for module_name, msg in self.pluginmanager.skipped_plugins:
- self.issue_config_time_warning(
- PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"),
- stacklevel=2,
- )
-
-
-def _assertion_supported() -> bool:
- try:
- assert False
- except AssertionError:
- return True
- else:
- return False # type: ignore[unreachable]
-
-
-def create_terminal_writer(
- config: Config, file: Optional[TextIO] = None
-) -> TerminalWriter:
- """Create a TerminalWriter instance configured according to the options
- in the config object.
-
- Every code which requires a TerminalWriter object and has access to a
- config object should use this function.
- """
- tw = TerminalWriter(file=file)
-
- if config.option.color == "yes":
- tw.hasmarkup = True
- elif config.option.color == "no":
- tw.hasmarkup = False
-
- if config.option.code_highlight == "yes":
- tw.code_highlight = True
- elif config.option.code_highlight == "no":
- tw.code_highlight = False
-
- return tw
-
-
-def _strtobool(val: str) -> bool:
- """Convert a string representation of truth to True or False.
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
-
- .. note:: Copied from distutils.util.
- """
- val = val.lower()
- if val in ("y", "yes", "t", "true", "on", "1"):
- return True
- elif val in ("n", "no", "f", "false", "off", "0"):
- return False
- else:
- raise ValueError(f"invalid truth value {val!r}")
-
-
-@lru_cache(maxsize=50)
-def parse_warning_filter(
- arg: str, *, escape: bool
-) -> Tuple["warnings._ActionKind", str, Type[Warning], str, int]:
- """Parse a warnings filter string.
-
- This is copied from warnings._setoption with the following changes:
-
- * Does not apply the filter.
- * Escaping is optional.
- * Raises UsageError so we get nice error messages on failure.
- """
- __tracebackhide__ = True
- error_template = dedent(
- f"""\
- while parsing the following warning configuration:
-
- {arg}
-
- This error occurred:
-
- {{error}}
- """
- )
-
- parts = arg.split(":")
- if len(parts) > 5:
- doc_url = (
- "https://docs.python.org/3/library/warnings.html#describing-warning-filters"
- )
- error = dedent(
- f"""\
- Too many fields ({len(parts)}), expected at most 5 separated by colons:
-
- action:message:category:module:line
-
- For more information please consult: {doc_url}
- """
- )
- raise UsageError(error_template.format(error=error))
-
- while len(parts) < 5:
- parts.append("")
- action_, message, category_, module, lineno_ = (s.strip() for s in parts)
- try:
- action: "warnings._ActionKind" = warnings._getaction(action_) # type: ignore[attr-defined]
- except warnings._OptionError as e:
- raise UsageError(error_template.format(error=str(e)))
- try:
- category: Type[Warning] = _resolve_warning_category(category_)
- except Exception:
- exc_info = ExceptionInfo.from_current()
- exception_text = exc_info.getrepr(style="native")
- raise UsageError(error_template.format(error=exception_text))
- if message and escape:
- message = re.escape(message)
- if module and escape:
- module = re.escape(module) + r"\Z"
- if lineno_:
- try:
- lineno = int(lineno_)
- if lineno < 0:
- raise ValueError("number is negative")
- except ValueError as e:
- raise UsageError(
- error_template.format(error=f"invalid lineno {lineno_!r}: {e}")
- )
- else:
- lineno = 0
- return action, message, category, module, lineno
-
-
-def _resolve_warning_category(category: str) -> Type[Warning]:
- """
- Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors)
- propagate so we can get access to their tracebacks (#9218).
- """
- __tracebackhide__ = True
- if not category:
- return Warning
-
- if "." not in category:
- import builtins as m
-
- klass = category
- else:
- module, _, klass = category.rpartition(".")
- m = __import__(module, None, None, [klass])
- cat = getattr(m, klass)
- if not issubclass(cat, Warning):
- raise UsageError(f"{cat} is not a Warning subclass")
- return cast(Type[Warning], cat)
-
-
-def apply_warning_filters(
- config_filters: Iterable[str], cmdline_filters: Iterable[str]
-) -> None:
- """Applies pytest-configured filters to the warnings module"""
- # Filters should have this precedence: cmdline options, config.
- # Filters should be applied in the inverse order of precedence.
- for arg in config_filters:
- warnings.filterwarnings(*parse_warning_filter(arg, escape=False))
-
- for arg in cmdline_filters:
- warnings.filterwarnings(*parse_warning_filter(arg, escape=True))
diff --git a/contrib/python/pytest/py3/_pytest/config/argparsing.py b/contrib/python/pytest/py3/_pytest/config/argparsing.py
deleted file mode 100644
index b0bb3f168f..0000000000
--- a/contrib/python/pytest/py3/_pytest/config/argparsing.py
+++ /dev/null
@@ -1,535 +0,0 @@
-import argparse
-import os
-import sys
-import warnings
-from gettext import gettext
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Dict
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-import _pytest._io
-from _pytest.compat import final
-from _pytest.config.exceptions import UsageError
-from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT
-from _pytest.deprecated import ARGUMENT_TYPE_STR
-from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE
-from _pytest.deprecated import check_ispytest
-
-if TYPE_CHECKING:
- from typing import NoReturn
- from typing_extensions import Literal
-
-FILE_OR_DIR = "file_or_dir"
-
-
-@final
-class Parser:
- """Parser for command line arguments and ini-file values.
-
- :ivar extra_info: Dict of generic param -> value to display in case
- there's an error processing the command line arguments.
- """
-
- prog: Optional[str] = None
-
- def __init__(
- self,
- usage: Optional[str] = None,
- processopt: Optional[Callable[["Argument"], None]] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self._anonymous = OptionGroup("custom options", parser=self, _ispytest=True)
- self._groups: List[OptionGroup] = []
- self._processopt = processopt
- self._usage = usage
- self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {}
- self._ininames: List[str] = []
- self.extra_info: Dict[str, Any] = {}
-
- def processoption(self, option: "Argument") -> None:
- if self._processopt:
- if option.dest:
- self._processopt(option)
-
- def getgroup(
- self, name: str, description: str = "", after: Optional[str] = None
- ) -> "OptionGroup":
- """Get (or create) a named option Group.
-
- :name: Name of the option group.
- :description: Long description for --help output.
- :after: Name of another group, used for ordering --help output.
-
- The returned group object has an ``addoption`` method with the same
- signature as :func:`parser.addoption <pytest.Parser.addoption>` but
- will be shown in the respective group in the output of
- ``pytest. --help``.
- """
- for group in self._groups:
- if group.name == name:
- return group
- group = OptionGroup(name, description, parser=self, _ispytest=True)
- i = 0
- for i, grp in enumerate(self._groups):
- if grp.name == after:
- break
- self._groups.insert(i + 1, group)
- return group
-
- def addoption(self, *opts: str, **attrs: Any) -> None:
- """Register a command line option.
-
- :opts: Option names, can be short or long options.
- :attrs: Same attributes which the ``add_argument()`` function of the
- `argparse library <https://docs.python.org/library/argparse.html>`_
- accepts.
-
- After command line parsing, options are available on the pytest config
- object via ``config.option.NAME`` where ``NAME`` is usually set
- by passing a ``dest`` attribute, for example
- ``addoption("--long", dest="NAME", ...)``.
- """
- self._anonymous.addoption(*opts, **attrs)
-
- def parse(
- self,
- args: Sequence[Union[str, "os.PathLike[str]"]],
- namespace: Optional[argparse.Namespace] = None,
- ) -> argparse.Namespace:
- from _pytest._argcomplete import try_argcomplete
-
- self.optparser = self._getparser()
- try_argcomplete(self.optparser)
- strargs = [os.fspath(x) for x in args]
- return self.optparser.parse_args(strargs, namespace=namespace)
-
- def _getparser(self) -> "MyOptionParser":
- from _pytest._argcomplete import filescompleter
-
- optparser = MyOptionParser(self, self.extra_info, prog=self.prog)
- groups = self._groups + [self._anonymous]
- for group in groups:
- if group.options:
- desc = group.description or group.name
- arggroup = optparser.add_argument_group(desc)
- for option in group.options:
- n = option.names()
- a = option.attrs()
- arggroup.add_argument(*n, **a)
- file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*")
- # bash like autocompletion for dirs (appending '/')
- # Type ignored because typeshed doesn't know about argcomplete.
- file_or_dir_arg.completer = filescompleter # type: ignore
- return optparser
-
- def parse_setoption(
- self,
- args: Sequence[Union[str, "os.PathLike[str]"]],
- option: argparse.Namespace,
- namespace: Optional[argparse.Namespace] = None,
- ) -> List[str]:
- parsedoption = self.parse(args, namespace=namespace)
- for name, value in parsedoption.__dict__.items():
- setattr(option, name, value)
- return cast(List[str], getattr(parsedoption, FILE_OR_DIR))
-
- def parse_known_args(
- self,
- args: Sequence[Union[str, "os.PathLike[str]"]],
- namespace: Optional[argparse.Namespace] = None,
- ) -> argparse.Namespace:
- """Parse and return a namespace object with known arguments at this point."""
- return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
-
- def parse_known_and_unknown_args(
- self,
- args: Sequence[Union[str, "os.PathLike[str]"]],
- namespace: Optional[argparse.Namespace] = None,
- ) -> Tuple[argparse.Namespace, List[str]]:
- """Parse and return a namespace object with known arguments, and
- the remaining arguments unknown at this point."""
- optparser = self._getparser()
- strargs = [os.fspath(x) for x in args]
- return optparser.parse_known_args(strargs, namespace=namespace)
-
- def addini(
- self,
- name: str,
- help: str,
- type: Optional[
- "Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']"
- ] = None,
- default=None,
- ) -> None:
- """Register an ini-file option.
-
- :name:
- Name of the ini-variable.
- :type:
- Type of the variable. Can be:
-
- * ``string``: a string
- * ``bool``: a boolean
- * ``args``: a list of strings, separated as in a shell
- * ``linelist``: a list of strings, separated by line breaks
- * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell
- * ``pathlist``: a list of ``py.path``, separated as in a shell
-
- .. versionadded:: 7.0
- The ``paths`` variable type.
-
- Defaults to ``string`` if ``None`` or not passed.
- :default:
- Default value if no ini-file option exists but is queried.
-
- The value of ini-variables can be retrieved via a call to
- :py:func:`config.getini(name) <pytest.Config.getini>`.
- """
- assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool")
- self._inidict[name] = (help, type, default)
- self._ininames.append(name)
-
-
-class ArgumentError(Exception):
- """Raised if an Argument instance is created with invalid or
- inconsistent arguments."""
-
- def __init__(self, msg: str, option: Union["Argument", str]) -> None:
- self.msg = msg
- self.option_id = str(option)
-
- def __str__(self) -> str:
- if self.option_id:
- return f"option {self.option_id}: {self.msg}"
- else:
- return self.msg
-
-
-class Argument:
- """Class that mimics the necessary behaviour of optparse.Option.
-
- It's currently a least effort implementation and ignoring choices
- and integer prefixes.
-
- https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
- """
-
- _typ_map = {"int": int, "string": str, "float": float, "complex": complex}
-
- def __init__(self, *names: str, **attrs: Any) -> None:
- """Store parms in private vars for use in add_argument."""
- self._attrs = attrs
- self._short_opts: List[str] = []
- self._long_opts: List[str] = []
- if "%default" in (attrs.get("help") or ""):
- warnings.warn(ARGUMENT_PERCENT_DEFAULT, stacklevel=3)
- try:
- typ = attrs["type"]
- except KeyError:
- pass
- else:
- # This might raise a keyerror as well, don't want to catch that.
- if isinstance(typ, str):
- if typ == "choice":
- warnings.warn(
- ARGUMENT_TYPE_STR_CHOICE.format(typ=typ, names=names),
- stacklevel=4,
- )
- # argparse expects a type here take it from
- # the type of the first element
- attrs["type"] = type(attrs["choices"][0])
- else:
- warnings.warn(
- ARGUMENT_TYPE_STR.format(typ=typ, names=names), stacklevel=4
- )
- attrs["type"] = Argument._typ_map[typ]
- # Used in test_parseopt -> test_parse_defaultgetter.
- self.type = attrs["type"]
- else:
- self.type = typ
- try:
- # Attribute existence is tested in Config._processopt.
- self.default = attrs["default"]
- except KeyError:
- pass
- self._set_opt_strings(names)
- dest: Optional[str] = attrs.get("dest")
- if dest:
- self.dest = dest
- elif self._long_opts:
- self.dest = self._long_opts[0][2:].replace("-", "_")
- else:
- try:
- self.dest = self._short_opts[0][1:]
- except IndexError as e:
- self.dest = "???" # Needed for the error repr.
- raise ArgumentError("need a long or short option", self) from e
-
- def names(self) -> List[str]:
- return self._short_opts + self._long_opts
-
- def attrs(self) -> Mapping[str, Any]:
- # Update any attributes set by processopt.
- attrs = "default dest help".split()
- attrs.append(self.dest)
- for attr in attrs:
- try:
- self._attrs[attr] = getattr(self, attr)
- except AttributeError:
- pass
- if self._attrs.get("help"):
- a = self._attrs["help"]
- a = a.replace("%default", "%(default)s")
- # a = a.replace('%prog', '%(prog)s')
- self._attrs["help"] = a
- return self._attrs
-
- def _set_opt_strings(self, opts: Sequence[str]) -> None:
- """Directly from optparse.
-
- Might not be necessary as this is passed to argparse later on.
- """
- for opt in opts:
- if len(opt) < 2:
- raise ArgumentError(
- "invalid option string %r: "
- "must be at least two characters long" % opt,
- self,
- )
- elif len(opt) == 2:
- if not (opt[0] == "-" and opt[1] != "-"):
- raise ArgumentError(
- "invalid short option string %r: "
- "must be of the form -x, (x any non-dash char)" % opt,
- self,
- )
- self._short_opts.append(opt)
- else:
- if not (opt[0:2] == "--" and opt[2] != "-"):
- raise ArgumentError(
- "invalid long option string %r: "
- "must start with --, followed by non-dash" % opt,
- self,
- )
- self._long_opts.append(opt)
-
- def __repr__(self) -> str:
- args: List[str] = []
- if self._short_opts:
- args += ["_short_opts: " + repr(self._short_opts)]
- if self._long_opts:
- args += ["_long_opts: " + repr(self._long_opts)]
- args += ["dest: " + repr(self.dest)]
- if hasattr(self, "type"):
- args += ["type: " + repr(self.type)]
- if hasattr(self, "default"):
- args += ["default: " + repr(self.default)]
- return "Argument({})".format(", ".join(args))
-
-
-class OptionGroup:
- """A group of options shown in its own section."""
-
- def __init__(
- self,
- name: str,
- description: str = "",
- parser: Optional[Parser] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self.name = name
- self.description = description
- self.options: List[Argument] = []
- self.parser = parser
-
- def addoption(self, *optnames: str, **attrs: Any) -> None:
- """Add an option to this group.
-
- If a shortened version of a long option is specified, it will
- be suppressed in the help. ``addoption('--twowords', '--two-words')``
- results in help showing ``--two-words`` only, but ``--twowords`` gets
- accepted **and** the automatic destination is in ``args.twowords``.
- """
- conflict = set(optnames).intersection(
- name for opt in self.options for name in opt.names()
- )
- if conflict:
- raise ValueError("option names %s already added" % conflict)
- option = Argument(*optnames, **attrs)
- self._addoption_instance(option, shortupper=False)
-
- def _addoption(self, *optnames: str, **attrs: Any) -> None:
- option = Argument(*optnames, **attrs)
- self._addoption_instance(option, shortupper=True)
-
- def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None:
- if not shortupper:
- for opt in option._short_opts:
- if opt[0] == "-" and opt[1].islower():
- raise ValueError("lowercase shortoptions reserved")
- if self.parser:
- self.parser.processoption(option)
- self.options.append(option)
-
-
-class MyOptionParser(argparse.ArgumentParser):
- def __init__(
- self,
- parser: Parser,
- extra_info: Optional[Dict[str, Any]] = None,
- prog: Optional[str] = None,
- ) -> None:
- self._parser = parser
- super().__init__(
- prog=prog,
- usage=parser._usage,
- add_help=False,
- formatter_class=DropShorterLongHelpFormatter,
- allow_abbrev=False,
- )
- # extra_info is a dict of (param -> value) to display if there's
- # an usage error to provide more contextual information to the user.
- self.extra_info = extra_info if extra_info else {}
-
- def error(self, message: str) -> "NoReturn":
- """Transform argparse error message into UsageError."""
- msg = f"{self.prog}: error: {message}"
-
- if hasattr(self._parser, "_config_source_hint"):
- # Type ignored because the attribute is set dynamically.
- msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore
-
- raise UsageError(self.format_usage() + msg)
-
- # Type ignored because typeshed has a very complex type in the superclass.
- def parse_args( # type: ignore
- self,
- args: Optional[Sequence[str]] = None,
- namespace: Optional[argparse.Namespace] = None,
- ) -> argparse.Namespace:
- """Allow splitting of positional arguments."""
- parsed, unrecognized = self.parse_known_args(args, namespace)
- if unrecognized:
- for arg in unrecognized:
- if arg and arg[0] == "-":
- lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
- for k, v in sorted(self.extra_info.items()):
- lines.append(f" {k}: {v}")
- self.error("\n".join(lines))
- getattr(parsed, FILE_OR_DIR).extend(unrecognized)
- return parsed
-
- if sys.version_info[:2] < (3, 9): # pragma: no cover
- # Backport of https://github.com/python/cpython/pull/14316 so we can
- # disable long --argument abbreviations without breaking short flags.
- def _parse_optional(
- self, arg_string: str
- ) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]:
- if not arg_string:
- return None
- if not arg_string[0] in self.prefix_chars:
- return None
- if arg_string in self._option_string_actions:
- action = self._option_string_actions[arg_string]
- return action, arg_string, None
- if len(arg_string) == 1:
- return None
- if "=" in arg_string:
- option_string, explicit_arg = arg_string.split("=", 1)
- if option_string in self._option_string_actions:
- action = self._option_string_actions[option_string]
- return action, option_string, explicit_arg
- if self.allow_abbrev or not arg_string.startswith("--"):
- option_tuples = self._get_option_tuples(arg_string)
- if len(option_tuples) > 1:
- msg = gettext(
- "ambiguous option: %(option)s could match %(matches)s"
- )
- options = ", ".join(option for _, option, _ in option_tuples)
- self.error(msg % {"option": arg_string, "matches": options})
- elif len(option_tuples) == 1:
- (option_tuple,) = option_tuples
- return option_tuple
- if self._negative_number_matcher.match(arg_string):
- if not self._has_negative_number_optionals:
- return None
- if " " in arg_string:
- return None
- return None, arg_string, None
-
-
-class DropShorterLongHelpFormatter(argparse.HelpFormatter):
- """Shorten help for long options that differ only in extra hyphens.
-
- - Collapse **long** options that are the same except for extra hyphens.
- - Shortcut if there are only two options and one of them is a short one.
- - Cache result on the action object as this is called at least 2 times.
- """
-
- def __init__(self, *args: Any, **kwargs: Any) -> None:
- # Use more accurate terminal width.
- if "width" not in kwargs:
- kwargs["width"] = _pytest._io.get_terminal_width()
- super().__init__(*args, **kwargs)
-
- def _format_action_invocation(self, action: argparse.Action) -> str:
- orgstr = super()._format_action_invocation(action)
- if orgstr and orgstr[0] != "-": # only optional arguments
- return orgstr
- res: Optional[str] = getattr(action, "_formatted_action_invocation", None)
- if res:
- return res
- options = orgstr.split(", ")
- if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
- # a shortcut for '-h, --help' or '--abc', '-a'
- action._formatted_action_invocation = orgstr # type: ignore
- return orgstr
- return_list = []
- short_long: Dict[str, str] = {}
- for option in options:
- if len(option) == 2 or option[2] == " ":
- continue
- if not option.startswith("--"):
- raise ArgumentError(
- 'long optional argument without "--": [%s]' % (option), option
- )
- xxoption = option[2:]
- shortened = xxoption.replace("-", "")
- if shortened not in short_long or len(short_long[shortened]) < len(
- xxoption
- ):
- short_long[shortened] = xxoption
- # now short_long has been filled out to the longest with dashes
- # **and** we keep the right option ordering from add_argument
- for option in options:
- if len(option) == 2 or option[2] == " ":
- return_list.append(option)
- if option[2:] == short_long.get(option.replace("-", "")):
- return_list.append(option.replace(" ", "=", 1))
- formatted_action_invocation = ", ".join(return_list)
- action._formatted_action_invocation = formatted_action_invocation # type: ignore
- return formatted_action_invocation
-
- def _split_lines(self, text, width):
- """Wrap lines after splitting on original newlines.
-
- This allows to have explicit line breaks in the help text.
- """
- import textwrap
-
- lines = []
- for line in text.splitlines():
- lines.extend(textwrap.wrap(line.strip(), width))
- return lines
diff --git a/contrib/python/pytest/py3/_pytest/config/compat.py b/contrib/python/pytest/py3/_pytest/config/compat.py
deleted file mode 100644
index ba267d2150..0000000000
--- a/contrib/python/pytest/py3/_pytest/config/compat.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import functools
-import warnings
-from pathlib import Path
-from typing import Optional
-
-from ..compat import LEGACY_PATH
-from ..compat import legacy_path
-from ..deprecated import HOOK_LEGACY_PATH_ARG
-from _pytest.nodes import _check_path
-
-# hookname: (Path, LEGACY_PATH)
-imply_paths_hooks = {
- "pytest_ignore_collect": ("collection_path", "path"),
- "pytest_collect_file": ("file_path", "path"),
- "pytest_pycollect_makemodule": ("module_path", "path"),
- "pytest_report_header": ("start_path", "startdir"),
- "pytest_report_collectionfinish": ("start_path", "startdir"),
-}
-
-
-class PathAwareHookProxy:
- """
- this helper wraps around hook callers
- until pluggy supports fixingcalls, this one will do
-
- it currently doesn't return full hook caller proxies for fixed hooks,
- this may have to be changed later depending on bugs
- """
-
- def __init__(self, hook_caller):
- self.__hook_caller = hook_caller
-
- def __dir__(self):
- return dir(self.__hook_caller)
-
- def __getattr__(self, key, _wraps=functools.wraps):
- hook = getattr(self.__hook_caller, key)
- if key not in imply_paths_hooks:
- self.__dict__[key] = hook
- return hook
- else:
- path_var, fspath_var = imply_paths_hooks[key]
-
- @_wraps(hook)
- def fixed_hook(**kw):
-
- path_value: Optional[Path] = kw.pop(path_var, None)
- fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None)
- if fspath_value is not None:
- warnings.warn(
- HOOK_LEGACY_PATH_ARG.format(
- pylib_path_arg=fspath_var, pathlib_path_arg=path_var
- ),
- stacklevel=2,
- )
- if path_value is not None:
- if fspath_value is not None:
- _check_path(path_value, fspath_value)
- else:
- fspath_value = legacy_path(path_value)
- else:
- assert fspath_value is not None
- path_value = Path(fspath_value)
-
- kw[path_var] = path_value
- kw[fspath_var] = fspath_value
- return hook(**kw)
-
- fixed_hook.__name__ = key
- self.__dict__[key] = fixed_hook
- return fixed_hook
diff --git a/contrib/python/pytest/py3/_pytest/config/exceptions.py b/contrib/python/pytest/py3/_pytest/config/exceptions.py
deleted file mode 100644
index 4f1320e758..0000000000
--- a/contrib/python/pytest/py3/_pytest/config/exceptions.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from _pytest.compat import final
-
-
-@final
-class UsageError(Exception):
- """Error in pytest usage or invocation."""
-
-
-class PrintHelp(Exception):
- """Raised when pytest should print its help to skip the rest of the
- argument parsing and validation."""
diff --git a/contrib/python/pytest/py3/_pytest/config/findpaths.py b/contrib/python/pytest/py3/_pytest/config/findpaths.py
deleted file mode 100644
index c082e652d9..0000000000
--- a/contrib/python/pytest/py3/_pytest/config/findpaths.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import os
-from pathlib import Path
-from typing import Dict
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import Sequence
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-import iniconfig
-
-from .exceptions import UsageError
-from _pytest.outcomes import fail
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import commonpath
-
-if TYPE_CHECKING:
- from . import Config
-
-
-def _parse_ini_config(path: Path) -> iniconfig.IniConfig:
- """Parse the given generic '.ini' file using legacy IniConfig parser, returning
- the parsed object.
-
- Raise UsageError if the file cannot be parsed.
- """
- try:
- return iniconfig.IniConfig(str(path))
- except iniconfig.ParseError as exc:
- raise UsageError(str(exc)) from exc
-
-
-def load_config_dict_from_file(
- filepath: Path,
-) -> Optional[Dict[str, Union[str, List[str]]]]:
- """Load pytest configuration from the given file path, if supported.
-
- Return None if the file does not contain valid pytest configuration.
- """
-
- # Configuration from ini files are obtained from the [pytest] section, if present.
- if filepath.suffix == ".ini":
- iniconfig = _parse_ini_config(filepath)
-
- if "pytest" in iniconfig:
- return dict(iniconfig["pytest"].items())
- else:
- # "pytest.ini" files are always the source of configuration, even if empty.
- if filepath.name == "pytest.ini":
- return {}
-
- # '.cfg' files are considered if they contain a "[tool:pytest]" section.
- elif filepath.suffix == ".cfg":
- iniconfig = _parse_ini_config(filepath)
-
- if "tool:pytest" in iniconfig.sections:
- return dict(iniconfig["tool:pytest"].items())
- elif "pytest" in iniconfig.sections:
- # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that
- # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086).
- fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False)
-
- # '.toml' files are considered if they contain a [tool.pytest.ini_options] table.
- elif filepath.suffix == ".toml":
- import tomli
-
- toml_text = filepath.read_text(encoding="utf-8")
- try:
- config = tomli.loads(toml_text)
- except tomli.TOMLDecodeError as exc:
- raise UsageError(f"{filepath}: {exc}") from exc
-
- result = config.get("tool", {}).get("pytest", {}).get("ini_options", None)
- if result is not None:
- # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc),
- # however we need to convert all scalar values to str for compatibility with the rest
- # of the configuration system, which expects strings only.
- def make_scalar(v: object) -> Union[str, List[str]]:
- return v if isinstance(v, list) else str(v)
-
- return {k: make_scalar(v) for k, v in result.items()}
-
- return None
-
-
-def locate_config(
- args: Iterable[Path],
-) -> Tuple[Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]]]:
- """Search in the list of arguments for a valid ini-file for pytest,
- and return a tuple of (rootdir, inifile, cfg-dict)."""
- config_names = [
- "pytest.ini",
- "pyproject.toml",
- "tox.ini",
- "setup.cfg",
- ]
- args = [x for x in args if not str(x).startswith("-")]
- if not args:
- args = [Path.cwd()]
- for arg in args:
- argpath = absolutepath(arg)
- for base in (argpath, *argpath.parents):
- for config_name in config_names:
- p = base / config_name
- if p.is_file():
- ini_config = load_config_dict_from_file(p)
- if ini_config is not None:
- return base, p, ini_config
- return None, None, {}
-
-
-def get_common_ancestor(paths: Iterable[Path]) -> Path:
- common_ancestor: Optional[Path] = None
- for path in paths:
- if not path.exists():
- continue
- if common_ancestor is None:
- common_ancestor = path
- else:
- if common_ancestor in path.parents or path == common_ancestor:
- continue
- elif path in common_ancestor.parents:
- common_ancestor = path
- else:
- shared = commonpath(path, common_ancestor)
- if shared is not None:
- common_ancestor = shared
- if common_ancestor is None:
- common_ancestor = Path.cwd()
- elif common_ancestor.is_file():
- common_ancestor = common_ancestor.parent
- return common_ancestor
-
-
-def get_dirs_from_args(args: Iterable[str]) -> List[Path]:
- def is_option(x: str) -> bool:
- return x.startswith("-")
-
- def get_file_part_from_node_id(x: str) -> str:
- return x.split("::")[0]
-
- def get_dir_from_path(path: Path) -> Path:
- if path.is_dir():
- return path
- return path.parent
-
- def safe_exists(path: Path) -> bool:
- # This can throw on paths that contain characters unrepresentable at the OS level,
- # or with invalid syntax on Windows (https://bugs.python.org/issue35306)
- try:
- return path.exists()
- except OSError:
- return False
-
- # These look like paths but may not exist
- possible_paths = (
- absolutepath(get_file_part_from_node_id(arg))
- for arg in args
- if not is_option(arg)
- )
-
- return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)]
-
-
-CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
-
-
-def determine_setup(
- inifile: Optional[str],
- args: Sequence[str],
- rootdir_cmd_arg: Optional[str] = None,
- config: Optional["Config"] = None,
-) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
- rootdir = None
- dirs = get_dirs_from_args(args)
- if inifile:
- inipath_ = absolutepath(inifile)
- inipath: Optional[Path] = inipath_
- inicfg = load_config_dict_from_file(inipath_) or {}
- if rootdir_cmd_arg is None:
- rootdir = inipath_.parent
- else:
- ancestor = get_common_ancestor(dirs)
- rootdir, inipath, inicfg = locate_config([ancestor])
- if rootdir is None and rootdir_cmd_arg is None:
- for possible_rootdir in (ancestor, *ancestor.parents):
- if (possible_rootdir / "setup.py").is_file():
- rootdir = possible_rootdir
- break
- else:
- if dirs != [ancestor]:
- rootdir, inipath, inicfg = locate_config(dirs)
- if rootdir is None:
- if config is not None:
- cwd = config.invocation_params.dir
- else:
- cwd = Path.cwd()
- rootdir = get_common_ancestor([cwd, ancestor])
- is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
- if is_fs_root:
- rootdir = ancestor
- if rootdir_cmd_arg:
- rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg))
- if not rootdir.is_dir():
- raise UsageError(
- "Directory '{}' not found. Check your '--rootdir' option.".format(
- rootdir
- )
- )
- assert rootdir is not None
- return rootdir, inipath, inicfg or {}
diff --git a/contrib/python/pytest/py3/_pytest/debugging.py b/contrib/python/pytest/py3/_pytest/debugging.py
deleted file mode 100644
index eb51eddbe4..0000000000
--- a/contrib/python/pytest/py3/_pytest/debugging.py
+++ /dev/null
@@ -1,427 +0,0 @@
-"""Interactive debugging with PDB, the Python Debugger."""
-import argparse
-import functools
-import os
-import sys
-import types
-from typing import Any
-from typing import Callable
-from typing import Generator
-from typing import List
-from typing import Optional
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-
-from _pytest import outcomes
-from _pytest._code import ExceptionInfo
-from _pytest.config import Config
-from _pytest.config import ConftestImportFailure
-from _pytest.config import hookimpl
-from _pytest.config import PytestPluginManager
-from _pytest.config.argparsing import Parser
-from _pytest.config.exceptions import UsageError
-from _pytest.nodes import Node
-from _pytest.reports import BaseReport
-
-if TYPE_CHECKING:
- from _pytest.capture import CaptureManager
- from _pytest.runner import CallInfo
-
-
-def import_readline():
- try:
- import readline
- except ImportError:
- sys.path.append('/usr/lib/python2.7/lib-dynload')
-
- try:
- import readline
- except ImportError as e:
- print('can not import readline:', e)
-
- import subprocess
- try:
- subprocess.check_call('stty icrnl'.split())
- except OSError as e:
- print('can not restore Enter, use Control+J:', e)
-
-
-def tty():
- if os.isatty(1):
- return
-
- fd = os.open('/dev/tty', os.O_RDWR)
- os.dup2(fd, 0)
- os.dup2(fd, 1)
- os.dup2(fd, 2)
- os.close(fd)
-
- old_sys_path = sys.path
- sys.path = list(sys.path)
- try:
- import_readline()
- finally:
- sys.path = old_sys_path
-
-
-def _validate_usepdb_cls(value: str) -> Tuple[str, str]:
- """Validate syntax of --pdbcls option."""
- try:
- modname, classname = value.split(":")
- except ValueError as e:
- raise argparse.ArgumentTypeError(
- f"{value!r} is not in the format 'modname:classname'"
- ) from e
- return (modname, classname)
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group._addoption(
- "--pdb",
- dest="usepdb",
- action="store_true",
- help="start the interactive Python debugger on errors or KeyboardInterrupt.",
- )
- group._addoption(
- "--pdbcls",
- dest="usepdb_cls",
- metavar="modulename:classname",
- type=_validate_usepdb_cls,
- help="specify a custom interactive Python debugger for use with --pdb."
- "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
- )
- group._addoption(
- "--trace",
- dest="trace",
- action="store_true",
- help="Immediately break when running each test.",
- )
-
-
-def pytest_configure(config: Config) -> None:
- import pdb
-
- if config.getvalue("trace"):
- config.pluginmanager.register(PdbTrace(), "pdbtrace")
- if config.getvalue("usepdb"):
- config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
-
- pytestPDB._saved.append(
- (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config)
- )
- pdb.set_trace = pytestPDB.set_trace
- pytestPDB._pluginmanager = config.pluginmanager
- pytestPDB._config = config
-
- # NOTE: not using pytest_unconfigure, since it might get called although
- # pytest_configure was not (if another plugin raises UsageError).
- def fin() -> None:
- (
- pdb.set_trace,
- pytestPDB._pluginmanager,
- pytestPDB._config,
- ) = pytestPDB._saved.pop()
-
- config.add_cleanup(fin)
-
-
-class pytestPDB:
- """Pseudo PDB that defers to the real pdb."""
-
- _pluginmanager: Optional[PytestPluginManager] = None
- _config: Optional[Config] = None
- _saved: List[
- Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]]
- ] = []
- _recursive_debug = 0
- _wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None
-
- @classmethod
- def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]:
- if capman:
- return capman.is_capturing()
- return False
-
- @classmethod
- def _import_pdb_cls(cls, capman: Optional["CaptureManager"]):
- if not cls._config:
- import pdb
-
- # Happens when using pytest.set_trace outside of a test.
- return pdb.Pdb
-
- usepdb_cls = cls._config.getvalue("usepdb_cls")
-
- if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls:
- return cls._wrapped_pdb_cls[1]
-
- if usepdb_cls:
- modname, classname = usepdb_cls
-
- try:
- __import__(modname)
- mod = sys.modules[modname]
-
- # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
- parts = classname.split(".")
- pdb_cls = getattr(mod, parts[0])
- for part in parts[1:]:
- pdb_cls = getattr(pdb_cls, part)
- except Exception as exc:
- value = ":".join((modname, classname))
- raise UsageError(
- f"--pdbcls: could not import {value!r}: {exc}"
- ) from exc
- else:
- import pdb
-
- pdb_cls = pdb.Pdb
-
- wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman)
- cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls)
- return wrapped_cls
-
- @classmethod
- def _get_pdb_wrapper_class(cls, pdb_cls, capman: Optional["CaptureManager"]):
- import _pytest.config
-
- # Type ignored because mypy doesn't support "dynamic"
- # inheritance like this.
- class PytestPdbWrapper(pdb_cls): # type: ignore[valid-type,misc]
- _pytest_capman = capman
- _continued = False
-
- def do_debug(self, arg):
- cls._recursive_debug += 1
- ret = super().do_debug(arg)
- cls._recursive_debug -= 1
- return ret
-
- def do_continue(self, arg):
- ret = super().do_continue(arg)
- if cls._recursive_debug == 0:
- assert cls._config is not None
- tw = _pytest.config.create_terminal_writer(cls._config)
- tw.line()
-
- capman = self._pytest_capman
- capturing = pytestPDB._is_capturing(capman)
- if capturing:
- if capturing == "global":
- tw.sep(">", "PDB continue (IO-capturing resumed)")
- else:
- tw.sep(
- ">",
- "PDB continue (IO-capturing resumed for %s)"
- % capturing,
- )
- assert capman is not None
- capman.resume()
- else:
- tw.sep(">", "PDB continue")
- assert cls._pluginmanager is not None
- cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self)
- self._continued = True
- return ret
-
- do_c = do_cont = do_continue
-
- def do_quit(self, arg):
- """Raise Exit outcome when quit command is used in pdb.
-
- This is a bit of a hack - it would be better if BdbQuit
- could be handled, but this would require to wrap the
- whole pytest run, and adjust the report etc.
- """
- ret = super().do_quit(arg)
-
- if cls._recursive_debug == 0:
- outcomes.exit("Quitting debugger")
-
- return ret
-
- do_q = do_quit
- do_exit = do_quit
-
- def setup(self, f, tb):
- """Suspend on setup().
-
- Needed after do_continue resumed, and entering another
- breakpoint again.
- """
- ret = super().setup(f, tb)
- if not ret and self._continued:
- # pdb.setup() returns True if the command wants to exit
- # from the interaction: do not suspend capturing then.
- if self._pytest_capman:
- self._pytest_capman.suspend_global_capture(in_=True)
- return ret
-
- def get_stack(self, f, t):
- stack, i = super().get_stack(f, t)
- if f is None:
- # Find last non-hidden frame.
- i = max(0, len(stack) - 1)
- while i and stack[i][0].f_locals.get("__tracebackhide__", False):
- i -= 1
- return stack, i
-
- return PytestPdbWrapper
-
- @classmethod
- def _init_pdb(cls, method, *args, **kwargs):
- """Initialize PDB debugging, dropping any IO capturing."""
- import _pytest.config
-
- if cls._pluginmanager is None:
- capman: Optional[CaptureManager] = None
- else:
- capman = cls._pluginmanager.getplugin("capturemanager")
- if capman:
- capman.suspend(in_=True)
-
- if cls._config:
- tw = _pytest.config.create_terminal_writer(cls._config)
- tw.line()
-
- if cls._recursive_debug == 0:
- # Handle header similar to pdb.set_trace in py37+.
- header = kwargs.pop("header", None)
- if header is not None:
- tw.sep(">", header)
- else:
- capturing = cls._is_capturing(capman)
- if capturing == "global":
- tw.sep(">", f"PDB {method} (IO-capturing turned off)")
- elif capturing:
- tw.sep(
- ">",
- "PDB %s (IO-capturing turned off for %s)"
- % (method, capturing),
- )
- else:
- tw.sep(">", f"PDB {method}")
-
- _pdb = cls._import_pdb_cls(capman)(**kwargs)
-
- if cls._pluginmanager:
- cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
- return _pdb
-
- @classmethod
- def set_trace(cls, *args, **kwargs) -> None:
- """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
- tty()
- frame = sys._getframe().f_back
- _pdb = cls._init_pdb("set_trace", *args, **kwargs)
- _pdb.set_trace(frame)
-
-
-class PdbInvoke:
- def pytest_exception_interact(
- self, node: Node, call: "CallInfo[Any]", report: BaseReport
- ) -> None:
- capman = node.config.pluginmanager.getplugin("capturemanager")
- if capman:
- capman.suspend_global_capture(in_=True)
- out, err = capman.read_global_capture()
- sys.stdout.write(out)
- sys.stdout.write(err)
- tty()
- assert call.excinfo is not None
- _enter_pdb(node, call.excinfo, report)
-
- def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None:
- tb = _postmortem_traceback(excinfo)
- post_mortem(tb)
-
-
-class PdbTrace:
- @hookimpl(hookwrapper=True)
- def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]:
- wrap_pytest_function_for_tracing(pyfuncitem)
- yield
-
-
-def wrap_pytest_function_for_tracing(pyfuncitem):
- """Change the Python function object of the given Function item by a
- wrapper which actually enters pdb before calling the python function
- itself, effectively leaving the user in the pdb prompt in the first
- statement of the function."""
- _pdb = pytestPDB._init_pdb("runcall")
- testfunction = pyfuncitem.obj
-
- # we can't just return `partial(pdb.runcall, testfunction)` because (on
- # python < 3.7.4) runcall's first param is `func`, which means we'd get
- # an exception if one of the kwargs to testfunction was called `func`.
- @functools.wraps(testfunction)
- def wrapper(*args, **kwargs):
- func = functools.partial(testfunction, *args, **kwargs)
- _pdb.runcall(func)
-
- pyfuncitem.obj = wrapper
-
-
-def maybe_wrap_pytest_function_for_tracing(pyfuncitem):
- """Wrap the given pytestfunct item for tracing support if --trace was given in
- the command line."""
- if pyfuncitem.config.getvalue("trace"):
- wrap_pytest_function_for_tracing(pyfuncitem)
-
-
-def _enter_pdb(
- node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport
-) -> BaseReport:
- # XXX we re-use the TerminalReporter's terminalwriter
- # because this seems to avoid some encoding related troubles
- # for not completely clear reasons.
- tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
- tw.line()
-
- showcapture = node.config.option.showcapture
-
- for sectionname, content in (
- ("stdout", rep.capstdout),
- ("stderr", rep.capstderr),
- ("log", rep.caplog),
- ):
- if showcapture in (sectionname, "all") and content:
- tw.sep(">", "captured " + sectionname)
- if content[-1:] == "\n":
- content = content[:-1]
- tw.line(content)
-
- tw.sep(">", "traceback")
- rep.toterminal(tw)
- tw.sep(">", "entering PDB")
- tb = _postmortem_traceback(excinfo)
- rep._pdbshown = True # type: ignore[attr-defined]
- post_mortem(tb)
- return rep
-
-
-def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType:
- from doctest import UnexpectedException
-
- if isinstance(excinfo.value, UnexpectedException):
- # A doctest.UnexpectedException is not useful for post_mortem.
- # Use the underlying exception instead:
- return excinfo.value.exc_info[2]
- elif isinstance(excinfo.value, ConftestImportFailure):
- # A config.ConftestImportFailure is not useful for post_mortem.
- # Use the underlying exception instead:
- return excinfo.value.excinfo[2]
- else:
- assert excinfo._excinfo is not None
- return excinfo._excinfo[2]
-
-
-def post_mortem(t: types.TracebackType) -> None:
- p = pytestPDB._init_pdb("post_mortem")
- p.reset()
- p.interaction(None, t)
- if p.quitting:
- outcomes.exit("Quitting debugger")
diff --git a/contrib/python/pytest/py3/_pytest/deprecated.py b/contrib/python/pytest/py3/_pytest/deprecated.py
deleted file mode 100644
index f2d79760ae..0000000000
--- a/contrib/python/pytest/py3/_pytest/deprecated.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""Deprecation messages and bits of code used elsewhere in the codebase that
-is planned to be removed in the next pytest release.
-
-Keeping it in a central location makes it easy to track what is deprecated and should
-be removed when the time comes.
-
-All constants defined in this module should be either instances of
-:class:`PytestWarning`, or :class:`UnformattedWarning`
-in case of warnings which need to format their messages.
-"""
-from warnings import warn
-
-from _pytest.warning_types import PytestDeprecationWarning
-from _pytest.warning_types import PytestRemovedIn8Warning
-from _pytest.warning_types import UnformattedWarning
-
-# set of plugins which have been integrated into the core; we use this list to ignore
-# them during registration to avoid conflicts
-DEPRECATED_EXTERNAL_PLUGINS = {
- "pytest_catchlog",
- "pytest_capturelog",
- "pytest_faulthandler",
-}
-
-
-# This can be* removed pytest 8, but it's harmless and common, so no rush to remove.
-# * If you're in the future: "could have been".
-YIELD_FIXTURE = PytestDeprecationWarning(
- "@pytest.yield_fixture is deprecated.\n"
- "Use @pytest.fixture instead; they are the same."
-)
-
-WARNING_CMDLINE_PREPARSE_HOOK = PytestRemovedIn8Warning(
- "The pytest_cmdline_preparse hook is deprecated and will be removed in a future release. \n"
- "Please use pytest_load_initial_conftests hook instead."
-)
-
-FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestRemovedIn8Warning(
- "The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; "
- "use self.session.gethookproxy() and self.session.isinitpath() instead. "
-)
-
-STRICT_OPTION = PytestRemovedIn8Warning(
- "The --strict option is deprecated, use --strict-markers instead."
-)
-
-# This deprecation is never really meant to be removed.
-PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.")
-
-ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning(
- 'pytest now uses argparse. "%default" should be changed to "%(default)s"',
-)
-
-ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning(
- PytestRemovedIn8Warning,
- "`type` argument to addoption() is the string {typ!r}."
- " For choices this is optional and can be omitted, "
- " but when supplied should be a type (for example `str` or `int`)."
- " (options: {names})",
-)
-
-ARGUMENT_TYPE_STR = UnformattedWarning(
- PytestRemovedIn8Warning,
- "`type` argument to addoption() is the string {typ!r}, "
- " but when supplied should be a type (for example `str` or `int`)."
- " (options: {names})",
-)
-
-
-HOOK_LEGACY_PATH_ARG = UnformattedWarning(
- PytestRemovedIn8Warning,
- "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n"
- "see https://docs.pytest.org/en/latest/deprecations.html"
- "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path",
-)
-
-NODE_CTOR_FSPATH_ARG = UnformattedWarning(
- PytestRemovedIn8Warning,
- "The (fspath: py.path.local) argument to {node_type_name} is deprecated. "
- "Please use the (path: pathlib.Path) argument instead.\n"
- "See https://docs.pytest.org/en/latest/deprecations.html"
- "#fspath-argument-for-node-constructors-replaced-with-pathlib-path",
-)
-
-WARNS_NONE_ARG = PytestRemovedIn8Warning(
- "Passing None has been deprecated.\n"
- "See https://docs.pytest.org/en/latest/how-to/capture-warnings.html"
- "#additional-use-cases-of-warnings-in-tests"
- " for alternatives in common use cases."
-)
-
-KEYWORD_MSG_ARG = UnformattedWarning(
- PytestRemovedIn8Warning,
- "pytest.{func}(msg=...) is now deprecated, use pytest.{func}(reason=...) instead",
-)
-
-INSTANCE_COLLECTOR = PytestRemovedIn8Warning(
- "The pytest.Instance collector type is deprecated and is no longer used. "
- "See https://docs.pytest.org/en/latest/deprecations.html#the-pytest-instance-collector",
-)
-
-# You want to make some `__init__` or function "private".
-#
-# def my_private_function(some, args):
-# ...
-#
-# Do this:
-#
-# def my_private_function(some, args, *, _ispytest: bool = False):
-# check_ispytest(_ispytest)
-# ...
-#
-# Change all internal/allowed calls to
-#
-# my_private_function(some, args, _ispytest=True)
-#
-# All other calls will get the default _ispytest=False and trigger
-# the warning (possibly error in the future).
-
-
-def check_ispytest(ispytest: bool) -> None:
- if not ispytest:
- warn(PRIVATE, stacklevel=3)
diff --git a/contrib/python/pytest/py3/_pytest/doctest.py b/contrib/python/pytest/py3/_pytest/doctest.py
deleted file mode 100644
index 7d37be2acc..0000000000
--- a/contrib/python/pytest/py3/_pytest/doctest.py
+++ /dev/null
@@ -1,734 +0,0 @@
-"""Discover and run doctests in modules and test files."""
-import bdb
-import inspect
-import os
-import platform
-import sys
-import traceback
-import types
-import warnings
-from contextlib import contextmanager
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import Dict
-from typing import Generator
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import Pattern
-from typing import Sequence
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-
-import pytest
-from _pytest import outcomes
-from _pytest._code.code import ExceptionInfo
-from _pytest._code.code import ReprFileLocation
-from _pytest._code.code import TerminalRepr
-from _pytest._io import TerminalWriter
-from _pytest.compat import safe_getattr
-from _pytest.config import Config
-from _pytest.config.argparsing import Parser
-from _pytest.fixtures import FixtureRequest
-from _pytest.nodes import Collector
-from _pytest.outcomes import OutcomeException
-from _pytest.pathlib import fnmatch_ex
-from _pytest.pathlib import import_path
-from _pytest.python_api import approx
-from _pytest.warning_types import PytestWarning
-
-if TYPE_CHECKING:
- import doctest
-
-DOCTEST_REPORT_CHOICE_NONE = "none"
-DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
-DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
-DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
-DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
-
-DOCTEST_REPORT_CHOICES = (
- DOCTEST_REPORT_CHOICE_NONE,
- DOCTEST_REPORT_CHOICE_CDIFF,
- DOCTEST_REPORT_CHOICE_NDIFF,
- DOCTEST_REPORT_CHOICE_UDIFF,
- DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
-)
-
-# Lazy definition of runner class
-RUNNER_CLASS = None
-# Lazy definition of output checker class
-CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None
-
-
-def pytest_addoption(parser: Parser) -> None:
- parser.addini(
- "doctest_optionflags",
- "option flags for doctests",
- type="args",
- default=["ELLIPSIS"],
- )
- parser.addini(
- "doctest_encoding", "encoding used for doctest files", default="utf-8"
- )
- group = parser.getgroup("collect")
- group.addoption(
- "--doctest-modules",
- action="store_true",
- default=False,
- help="run doctests in all .py modules",
- dest="doctestmodules",
- )
- group.addoption(
- "--doctest-report",
- type=str.lower,
- default="udiff",
- help="choose another output format for diffs on doctest failure",
- choices=DOCTEST_REPORT_CHOICES,
- dest="doctestreport",
- )
- group.addoption(
- "--doctest-glob",
- action="append",
- default=[],
- metavar="pat",
- help="doctests file matching pattern, default: test*.txt",
- dest="doctestglob",
- )
- group.addoption(
- "--doctest-ignore-import-errors",
- action="store_true",
- default=False,
- help="ignore doctest ImportErrors",
- dest="doctest_ignore_import_errors",
- )
- group.addoption(
- "--doctest-continue-on-failure",
- action="store_true",
- default=False,
- help="for a given doctest, continue to run after the first failure",
- dest="doctest_continue_on_failure",
- )
-
-
-def pytest_unconfigure() -> None:
- global RUNNER_CLASS
-
- RUNNER_CLASS = None
-
-
-def pytest_collect_file(
- file_path: Path,
- parent: Collector,
-) -> Optional[Union["DoctestModule", "DoctestTextfile"]]:
- config = parent.config
- if file_path.suffix == ".py":
- if config.option.doctestmodules and not any(
- (_is_setup_py(file_path), _is_main_py(file_path))
- ):
- mod: DoctestModule = DoctestModule.from_parent(parent, path=file_path)
- return mod
- elif _is_doctest(config, file_path, parent):
- txt: DoctestTextfile = DoctestTextfile.from_parent(parent, path=file_path)
- return txt
- return None
-
-
-def _is_setup_py(path: Path) -> bool:
- if path.name != "setup.py":
- return False
- contents = path.read_bytes()
- return b"setuptools" in contents or b"distutils" in contents
-
-
-def _is_doctest(config: Config, path: Path, parent: Collector) -> bool:
- if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
- return True
- globs = config.getoption("doctestglob") or ["test*.txt"]
- return any(fnmatch_ex(glob, path) for glob in globs)
-
-
-def _is_main_py(path: Path) -> bool:
- return path.name == "__main__.py"
-
-
-class ReprFailDoctest(TerminalRepr):
- def __init__(
- self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]]
- ) -> None:
- self.reprlocation_lines = reprlocation_lines
-
- def toterminal(self, tw: TerminalWriter) -> None:
- for reprlocation, lines in self.reprlocation_lines:
- for line in lines:
- tw.line(line)
- reprlocation.toterminal(tw)
-
-
-class MultipleDoctestFailures(Exception):
- def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None:
- super().__init__()
- self.failures = failures
-
-
-def _init_runner_class() -> Type["doctest.DocTestRunner"]:
- import doctest
-
- class PytestDoctestRunner(doctest.DebugRunner):
- """Runner to collect failures.
-
- Note that the out variable in this case is a list instead of a
- stdout-like object.
- """
-
- def __init__(
- self,
- checker: Optional["doctest.OutputChecker"] = None,
- verbose: Optional[bool] = None,
- optionflags: int = 0,
- continue_on_failure: bool = True,
- ) -> None:
- super().__init__(checker=checker, verbose=verbose, optionflags=optionflags)
- self.continue_on_failure = continue_on_failure
-
- def report_failure(
- self,
- out,
- test: "doctest.DocTest",
- example: "doctest.Example",
- got: str,
- ) -> None:
- failure = doctest.DocTestFailure(test, example, got)
- if self.continue_on_failure:
- out.append(failure)
- else:
- raise failure
-
- def report_unexpected_exception(
- self,
- out,
- test: "doctest.DocTest",
- example: "doctest.Example",
- exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType],
- ) -> None:
- if isinstance(exc_info[1], OutcomeException):
- raise exc_info[1]
- if isinstance(exc_info[1], bdb.BdbQuit):
- outcomes.exit("Quitting debugger")
- failure = doctest.UnexpectedException(test, example, exc_info)
- if self.continue_on_failure:
- out.append(failure)
- else:
- raise failure
-
- return PytestDoctestRunner
-
-
-def _get_runner(
- checker: Optional["doctest.OutputChecker"] = None,
- verbose: Optional[bool] = None,
- optionflags: int = 0,
- continue_on_failure: bool = True,
-) -> "doctest.DocTestRunner":
- # We need this in order to do a lazy import on doctest
- global RUNNER_CLASS
- if RUNNER_CLASS is None:
- RUNNER_CLASS = _init_runner_class()
- # Type ignored because the continue_on_failure argument is only defined on
- # PytestDoctestRunner, which is lazily defined so can't be used as a type.
- return RUNNER_CLASS( # type: ignore
- checker=checker,
- verbose=verbose,
- optionflags=optionflags,
- continue_on_failure=continue_on_failure,
- )
-
-
-class DoctestItem(pytest.Item):
- def __init__(
- self,
- name: str,
- parent: "Union[DoctestTextfile, DoctestModule]",
- runner: Optional["doctest.DocTestRunner"] = None,
- dtest: Optional["doctest.DocTest"] = None,
- ) -> None:
- super().__init__(name, parent)
- self.runner = runner
- self.dtest = dtest
- self.obj = None
- self.fixture_request: Optional[FixtureRequest] = None
-
- @classmethod
- def from_parent( # type: ignore
- cls,
- parent: "Union[DoctestTextfile, DoctestModule]",
- *,
- name: str,
- runner: "doctest.DocTestRunner",
- dtest: "doctest.DocTest",
- ):
- # incompatible signature due to imposed limits on subclass
- """The public named constructor."""
- return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
-
- def setup(self) -> None:
- if self.dtest is not None:
- self.fixture_request = _setup_fixtures(self)
- globs = dict(getfixture=self.fixture_request.getfixturevalue)
- for name, value in self.fixture_request.getfixturevalue(
- "doctest_namespace"
- ).items():
- globs[name] = value
- self.dtest.globs.update(globs)
-
- def runtest(self) -> None:
- assert self.dtest is not None
- assert self.runner is not None
- _check_all_skipped(self.dtest)
- self._disable_output_capturing_for_darwin()
- failures: List["doctest.DocTestFailure"] = []
- # Type ignored because we change the type of `out` from what
- # doctest expects.
- self.runner.run(self.dtest, out=failures) # type: ignore[arg-type]
- if failures:
- raise MultipleDoctestFailures(failures)
-
- def _disable_output_capturing_for_darwin(self) -> None:
- """Disable output capturing. Otherwise, stdout is lost to doctest (#985)."""
- if platform.system() != "Darwin":
- return
- capman = self.config.pluginmanager.getplugin("capturemanager")
- if capman:
- capman.suspend_global_capture(in_=True)
- out, err = capman.read_global_capture()
- sys.stdout.write(out)
- sys.stderr.write(err)
-
- # TODO: Type ignored -- breaks Liskov Substitution.
- def repr_failure( # type: ignore[override]
- self,
- excinfo: ExceptionInfo[BaseException],
- ) -> Union[str, TerminalRepr]:
- import doctest
-
- failures: Optional[
- Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]
- ] = None
- if isinstance(
- excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
- ):
- failures = [excinfo.value]
- elif isinstance(excinfo.value, MultipleDoctestFailures):
- failures = excinfo.value.failures
-
- if failures is None:
- return super().repr_failure(excinfo)
-
- reprlocation_lines = []
- for failure in failures:
- example = failure.example
- test = failure.test
- filename = test.filename
- if test.lineno is None:
- lineno = None
- else:
- lineno = test.lineno + example.lineno + 1
- message = type(failure).__name__
- # TODO: ReprFileLocation doesn't expect a None lineno.
- reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
- checker = _get_checker()
- report_choice = _get_report_choice(self.config.getoption("doctestreport"))
- if lineno is not None:
- assert failure.test.docstring is not None
- lines = failure.test.docstring.splitlines(False)
- # add line numbers to the left of the error message
- assert test.lineno is not None
- lines = [
- "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)
- ]
- # trim docstring error lines to 10
- lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
- else:
- lines = [
- "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
- ]
- indent = ">>>"
- for line in example.source.splitlines():
- lines.append(f"??? {indent} {line}")
- indent = "..."
- if isinstance(failure, doctest.DocTestFailure):
- lines += checker.output_difference(
- example, failure.got, report_choice
- ).split("\n")
- else:
- inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
- lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
- lines += [
- x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
- ]
- reprlocation_lines.append((reprlocation, lines))
- return ReprFailDoctest(reprlocation_lines)
-
- def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
- assert self.dtest is not None
- return self.path, self.dtest.lineno, "[doctest] %s" % self.name
-
-
-def _get_flag_lookup() -> Dict[str, int]:
- import doctest
-
- return dict(
- DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
- DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
- NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
- ELLIPSIS=doctest.ELLIPSIS,
- IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
- COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
- ALLOW_UNICODE=_get_allow_unicode_flag(),
- ALLOW_BYTES=_get_allow_bytes_flag(),
- NUMBER=_get_number_flag(),
- )
-
-
-def get_optionflags(parent):
- optionflags_str = parent.config.getini("doctest_optionflags")
- flag_lookup_table = _get_flag_lookup()
- flag_acc = 0
- for flag in optionflags_str:
- flag_acc |= flag_lookup_table[flag]
- return flag_acc
-
-
-def _get_continue_on_failure(config):
- continue_on_failure = config.getvalue("doctest_continue_on_failure")
- if continue_on_failure:
- # We need to turn off this if we use pdb since we should stop at
- # the first failure.
- if config.getvalue("usepdb"):
- continue_on_failure = False
- return continue_on_failure
-
-
-class DoctestTextfile(pytest.Module):
- obj = None
-
- def collect(self) -> Iterable[DoctestItem]:
- import doctest
-
- # Inspired by doctest.testfile; ideally we would use it directly,
- # but it doesn't support passing a custom checker.
- encoding = self.config.getini("doctest_encoding")
- text = self.path.read_text(encoding)
- filename = str(self.path)
- name = self.path.name
- globs = {"__name__": "__main__"}
-
- optionflags = get_optionflags(self)
-
- runner = _get_runner(
- verbose=False,
- optionflags=optionflags,
- checker=_get_checker(),
- continue_on_failure=_get_continue_on_failure(self.config),
- )
-
- parser = doctest.DocTestParser()
- test = parser.get_doctest(text, globs, name, filename, 0)
- if test.examples:
- yield DoctestItem.from_parent(
- self, name=test.name, runner=runner, dtest=test
- )
-
-
-def _check_all_skipped(test: "doctest.DocTest") -> None:
- """Raise pytest.skip() if all examples in the given DocTest have the SKIP
- option set."""
- import doctest
-
- all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
- if all_skipped:
- pytest.skip("all tests skipped by +SKIP option")
-
-
-def _is_mocked(obj: object) -> bool:
- """Return if an object is possibly a mock object by checking the
- existence of a highly improbable attribute."""
- return (
- safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
- is not None
- )
-
-
-@contextmanager
-def _patch_unwrap_mock_aware() -> Generator[None, None, None]:
- """Context manager which replaces ``inspect.unwrap`` with a version
- that's aware of mock objects and doesn't recurse into them."""
- real_unwrap = inspect.unwrap
-
- def _mock_aware_unwrap(
- func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None
- ) -> Any:
- try:
- if stop is None or stop is _is_mocked:
- return real_unwrap(func, stop=_is_mocked)
- _stop = stop
- return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func))
- except Exception as e:
- warnings.warn(
- "Got %r when unwrapping %r. This is usually caused "
- "by a violation of Python's object protocol; see e.g. "
- "https://github.com/pytest-dev/pytest/issues/5080" % (e, func),
- PytestWarning,
- )
- raise
-
- inspect.unwrap = _mock_aware_unwrap
- try:
- yield
- finally:
- inspect.unwrap = real_unwrap
-
-
-class DoctestModule(pytest.Module):
- def collect(self) -> Iterable[DoctestItem]:
- import doctest
-
- class MockAwareDocTestFinder(doctest.DocTestFinder):
- """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
-
- https://github.com/pytest-dev/pytest/issues/3456
- https://bugs.python.org/issue25532
- """
-
- def _find_lineno(self, obj, source_lines):
- """Doctest code does not take into account `@property`, this
- is a hackish way to fix it. https://bugs.python.org/issue17446
-
- Wrapped Doctests will need to be unwrapped so the correct
- line number is returned. This will be reported upstream. #8796
- """
- if isinstance(obj, property):
- obj = getattr(obj, "fget", obj)
-
- if hasattr(obj, "__wrapped__"):
- # Get the main obj in case of it being wrapped
- obj = inspect.unwrap(obj)
-
- # Type ignored because this is a private function.
- return super()._find_lineno( # type:ignore[misc]
- obj,
- source_lines,
- )
-
- def _find(
- self, tests, obj, name, module, source_lines, globs, seen
- ) -> None:
- if _is_mocked(obj):
- return
- with _patch_unwrap_mock_aware():
-
- # Type ignored because this is a private function.
- super()._find( # type:ignore[misc]
- tests, obj, name, module, source_lines, globs, seen
- )
-
- if self.path.name == "conftest.py":
- module = self.config.pluginmanager._importconftest(
- self.path,
- self.config.getoption("importmode"),
- rootpath=self.config.rootpath,
- )
- else:
- try:
- module = import_path(self.path, root=self.config.rootpath)
- except ImportError:
- if self.config.getvalue("doctest_ignore_import_errors"):
- pytest.skip("unable to import module %r" % self.path)
- else:
- raise
- # Uses internal doctest module parsing mechanism.
- finder = MockAwareDocTestFinder()
- optionflags = get_optionflags(self)
- runner = _get_runner(
- verbose=False,
- optionflags=optionflags,
- checker=_get_checker(),
- continue_on_failure=_get_continue_on_failure(self.config),
- )
-
- for test in finder.find(module, module.__name__):
- if test.examples: # skip empty doctests
- yield DoctestItem.from_parent(
- self, name=test.name, runner=runner, dtest=test
- )
-
-
-def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest:
- """Used by DoctestTextfile and DoctestItem to setup fixture information."""
-
- def func() -> None:
- pass
-
- doctest_item.funcargs = {} # type: ignore[attr-defined]
- fm = doctest_item.session._fixturemanager
- doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined]
- node=doctest_item, func=func, cls=None, funcargs=False
- )
- fixture_request = FixtureRequest(doctest_item, _ispytest=True)
- fixture_request._fillfixtures()
- return fixture_request
-
-
-def _init_checker_class() -> Type["doctest.OutputChecker"]:
- import doctest
- import re
-
- class LiteralsOutputChecker(doctest.OutputChecker):
- # Based on doctest_nose_plugin.py from the nltk project
- # (https://github.com/nltk/nltk) and on the "numtest" doctest extension
- # by Sebastien Boisgerault (https://github.com/boisgera/numtest).
-
- _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
- _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
- _number_re = re.compile(
- r"""
- (?P<number>
- (?P<mantissa>
- (?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
- |
- (?P<integer2> [+-]?\d+)\.
- )
- (?:
- [Ee]
- (?P<exponent1> [+-]?\d+)
- )?
- |
- (?P<integer3> [+-]?\d+)
- (?:
- [Ee]
- (?P<exponent2> [+-]?\d+)
- )
- )
- """,
- re.VERBOSE,
- )
-
- def check_output(self, want: str, got: str, optionflags: int) -> bool:
- if super().check_output(want, got, optionflags):
- return True
-
- allow_unicode = optionflags & _get_allow_unicode_flag()
- allow_bytes = optionflags & _get_allow_bytes_flag()
- allow_number = optionflags & _get_number_flag()
-
- if not allow_unicode and not allow_bytes and not allow_number:
- return False
-
- def remove_prefixes(regex: Pattern[str], txt: str) -> str:
- return re.sub(regex, r"\1\2", txt)
-
- if allow_unicode:
- want = remove_prefixes(self._unicode_literal_re, want)
- got = remove_prefixes(self._unicode_literal_re, got)
-
- if allow_bytes:
- want = remove_prefixes(self._bytes_literal_re, want)
- got = remove_prefixes(self._bytes_literal_re, got)
-
- if allow_number:
- got = self._remove_unwanted_precision(want, got)
-
- return super().check_output(want, got, optionflags)
-
- def _remove_unwanted_precision(self, want: str, got: str) -> str:
- wants = list(self._number_re.finditer(want))
- gots = list(self._number_re.finditer(got))
- if len(wants) != len(gots):
- return got
- offset = 0
- for w, g in zip(wants, gots):
- fraction: Optional[str] = w.group("fraction")
- exponent: Optional[str] = w.group("exponent1")
- if exponent is None:
- exponent = w.group("exponent2")
- precision = 0 if fraction is None else len(fraction)
- if exponent is not None:
- precision -= int(exponent)
- if float(w.group()) == approx(float(g.group()), abs=10**-precision):
- # They're close enough. Replace the text we actually
- # got with the text we want, so that it will match when we
- # check the string literally.
- got = (
- got[: g.start() + offset] + w.group() + got[g.end() + offset :]
- )
- offset += w.end() - w.start() - (g.end() - g.start())
- return got
-
- return LiteralsOutputChecker
-
-
-def _get_checker() -> "doctest.OutputChecker":
- """Return a doctest.OutputChecker subclass that supports some
- additional options:
-
- * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
- prefixes (respectively) in string literals. Useful when the same
- doctest should run in Python 2 and Python 3.
-
- * NUMBER to ignore floating-point differences smaller than the
- precision of the literal number in the doctest.
-
- An inner class is used to avoid importing "doctest" at the module
- level.
- """
- global CHECKER_CLASS
- if CHECKER_CLASS is None:
- CHECKER_CLASS = _init_checker_class()
- return CHECKER_CLASS()
-
-
-def _get_allow_unicode_flag() -> int:
- """Register and return the ALLOW_UNICODE flag."""
- import doctest
-
- return doctest.register_optionflag("ALLOW_UNICODE")
-
-
-def _get_allow_bytes_flag() -> int:
- """Register and return the ALLOW_BYTES flag."""
- import doctest
-
- return doctest.register_optionflag("ALLOW_BYTES")
-
-
-def _get_number_flag() -> int:
- """Register and return the NUMBER flag."""
- import doctest
-
- return doctest.register_optionflag("NUMBER")
-
-
-def _get_report_choice(key: str) -> int:
- """Return the actual `doctest` module flag value.
-
- We want to do it as late as possible to avoid importing `doctest` and all
- its dependencies when parsing options, as it adds overhead and breaks tests.
- """
- import doctest
-
- return {
- DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
- DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
- DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
- DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
- DOCTEST_REPORT_CHOICE_NONE: 0,
- }[key]
-
-
-@pytest.fixture(scope="session")
-def doctest_namespace() -> Dict[str, Any]:
- """Fixture that returns a :py:class:`dict` that will be injected into the
- namespace of doctests."""
- return dict()
diff --git a/contrib/python/pytest/py3/_pytest/faulthandler.py b/contrib/python/pytest/py3/_pytest/faulthandler.py
deleted file mode 100644
index aaee307ff2..0000000000
--- a/contrib/python/pytest/py3/_pytest/faulthandler.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import io
-import os
-import sys
-from typing import Generator
-from typing import TextIO
-
-import pytest
-from _pytest.config import Config
-from _pytest.config.argparsing import Parser
-from _pytest.nodes import Item
-from _pytest.stash import StashKey
-
-
-fault_handler_stderr_key = StashKey[TextIO]()
-fault_handler_originally_enabled_key = StashKey[bool]()
-
-
-def pytest_addoption(parser: Parser) -> None:
- help = (
- "Dump the traceback of all threads if a test takes "
- "more than TIMEOUT seconds to finish."
- )
- parser.addini("faulthandler_timeout", help, default=0.0)
-
-
-def pytest_configure(config: Config) -> None:
- import faulthandler
-
- stderr_fd_copy = os.dup(get_stderr_fileno())
- config.stash[fault_handler_stderr_key] = open(stderr_fd_copy, "w")
- config.stash[fault_handler_originally_enabled_key] = faulthandler.is_enabled()
- faulthandler.enable(file=config.stash[fault_handler_stderr_key])
-
-
-def pytest_unconfigure(config: Config) -> None:
- import faulthandler
-
- faulthandler.disable()
- # Close the dup file installed during pytest_configure.
- if fault_handler_stderr_key in config.stash:
- config.stash[fault_handler_stderr_key].close()
- del config.stash[fault_handler_stderr_key]
- if config.stash.get(fault_handler_originally_enabled_key, False):
- # Re-enable the faulthandler if it was originally enabled.
- faulthandler.enable(file=get_stderr_fileno())
-
-
-def get_stderr_fileno() -> int:
- try:
- fileno = sys.stderr.fileno()
- # The Twisted Logger will return an invalid file descriptor since it is not backed
- # by an FD. So, let's also forward this to the same code path as with pytest-xdist.
- if fileno == -1:
- raise AttributeError()
- return fileno
- except (AttributeError, io.UnsupportedOperation):
- # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
- # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
- # This is potentially dangerous, but the best we can do.
- return sys.__stderr__.fileno()
-
-
-def get_timeout_config_value(config: Config) -> float:
- return float(config.getini("faulthandler_timeout") or 0.0)
-
-
-@pytest.hookimpl(hookwrapper=True, trylast=True)
-def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
- timeout = get_timeout_config_value(item.config)
- stderr = item.config.stash[fault_handler_stderr_key]
- if timeout > 0 and stderr is not None:
- import faulthandler
-
- faulthandler.dump_traceback_later(timeout, file=stderr)
- try:
- yield
- finally:
- faulthandler.cancel_dump_traceback_later()
- else:
- yield
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_enter_pdb() -> None:
- """Cancel any traceback dumping due to timeout before entering pdb."""
- import faulthandler
-
- faulthandler.cancel_dump_traceback_later()
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_exception_interact() -> None:
- """Cancel any traceback dumping due to an interactive exception being
- raised."""
- import faulthandler
-
- faulthandler.cancel_dump_traceback_later()
diff --git a/contrib/python/pytest/py3/_pytest/fixtures.py b/contrib/python/pytest/py3/_pytest/fixtures.py
deleted file mode 100644
index ee3e93f190..0000000000
--- a/contrib/python/pytest/py3/_pytest/fixtures.py
+++ /dev/null
@@ -1,1655 +0,0 @@
-import functools
-import inspect
-import os
-import sys
-import warnings
-from collections import defaultdict
-from collections import deque
-from contextlib import suppress
-from pathlib import Path
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Dict
-from typing import Generator
-from typing import Generic
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import MutableMapping
-from typing import Optional
-from typing import overload
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import attr
-
-import _pytest
-from _pytest import nodes
-from _pytest._code import getfslineno
-from _pytest._code.code import FormattedExcinfo
-from _pytest._code.code import TerminalRepr
-from _pytest._io import TerminalWriter
-from _pytest.compat import _format_args
-from _pytest.compat import _PytestWrapper
-from _pytest.compat import assert_never
-from _pytest.compat import final
-from _pytest.compat import get_real_func
-from _pytest.compat import get_real_method
-from _pytest.compat import getfuncargnames
-from _pytest.compat import getimfunc
-from _pytest.compat import getlocation
-from _pytest.compat import is_generator
-from _pytest.compat import NOTSET
-from _pytest.compat import safe_getattr
-from _pytest.config import _PluggyPlugin
-from _pytest.config import Config
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.deprecated import YIELD_FIXTURE
-from _pytest.mark import Mark
-from _pytest.mark import ParameterSet
-from _pytest.mark.structures import MarkDecorator
-from _pytest.outcomes import fail
-from _pytest.outcomes import TEST_OUTCOME
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import bestrelpath
-from _pytest.scope import HIGH_SCOPES
-from _pytest.scope import Scope
-from _pytest.stash import StashKey
-
-
-if TYPE_CHECKING:
- from typing import Deque
- from typing import NoReturn
-
- from _pytest.scope import _ScopeName
- from _pytest.main import Session
- from _pytest.python import CallSpec2
- from _pytest.python import Metafunc
-
-
-# The value of the fixture -- return/yield of the fixture function (type variable).
-FixtureValue = TypeVar("FixtureValue")
-# The type of the fixture function (type variable).
-FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object])
-# The type of a fixture function (type alias generic in fixture value).
-_FixtureFunc = Union[
- Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]]
-]
-# The type of FixtureDef.cached_result (type alias generic in fixture value).
-_FixtureCachedResult = Union[
- Tuple[
- # The result.
- FixtureValue,
- # Cache key.
- object,
- None,
- ],
- Tuple[
- None,
- # Cache key.
- object,
- # Exc info if raised.
- Tuple[Type[BaseException], BaseException, TracebackType],
- ],
-]
-
-
-@attr.s(frozen=True, auto_attribs=True)
-class PseudoFixtureDef(Generic[FixtureValue]):
- cached_result: "_FixtureCachedResult[FixtureValue]"
- _scope: Scope
-
-
-def pytest_sessionstart(session: "Session") -> None:
- session._fixturemanager = FixtureManager(session)
-
-
-def get_scope_package(node, fixturedef: "FixtureDef[object]"):
- import pytest
-
- cls = pytest.Package
- current = node
- fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py")
- while current and (
- type(current) is not cls or fixture_package_name != current.nodeid
- ):
- current = current.parent
- if current is None:
- return node.session
- return current
-
-
-def get_scope_node(
- node: nodes.Node, scope: Scope
-) -> Optional[Union[nodes.Item, nodes.Collector]]:
- import _pytest.python
-
- if scope is Scope.Function:
- return node.getparent(nodes.Item)
- elif scope is Scope.Class:
- return node.getparent(_pytest.python.Class)
- elif scope is Scope.Module:
- return node.getparent(_pytest.python.Module)
- elif scope is Scope.Package:
- return node.getparent(_pytest.python.Package)
- elif scope is Scope.Session:
- return node.getparent(_pytest.main.Session)
- else:
- assert_never(scope)
-
-
-# Used for storing artificial fixturedefs for direct parametrization.
-name2pseudofixturedef_key = StashKey[Dict[str, "FixtureDef[Any]"]]()
-
-
-def add_funcarg_pseudo_fixture_def(
- collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager"
-) -> None:
- # This function will transform all collected calls to functions
- # if they use direct funcargs (i.e. direct parametrization)
- # because we want later test execution to be able to rely on
- # an existing FixtureDef structure for all arguments.
- # XXX we can probably avoid this algorithm if we modify CallSpec2
- # to directly care for creating the fixturedefs within its methods.
- if not metafunc._calls[0].funcargs:
- # This function call does not have direct parametrization.
- return
- # Collect funcargs of all callspecs into a list of values.
- arg2params: Dict[str, List[object]] = {}
- arg2scope: Dict[str, Scope] = {}
- for callspec in metafunc._calls:
- for argname, argvalue in callspec.funcargs.items():
- assert argname not in callspec.params
- callspec.params[argname] = argvalue
- arg2params_list = arg2params.setdefault(argname, [])
- callspec.indices[argname] = len(arg2params_list)
- arg2params_list.append(argvalue)
- if argname not in arg2scope:
- scope = callspec._arg2scope.get(argname, Scope.Function)
- arg2scope[argname] = scope
- callspec.funcargs.clear()
-
- # Register artificial FixtureDef's so that later at test execution
- # time we can rely on a proper FixtureDef to exist for fixture setup.
- arg2fixturedefs = metafunc._arg2fixturedefs
- for argname, valuelist in arg2params.items():
- # If we have a scope that is higher than function, we need
- # to make sure we only ever create an according fixturedef on
- # a per-scope basis. We thus store and cache the fixturedef on the
- # node related to the scope.
- scope = arg2scope[argname]
- node = None
- if scope is not Scope.Function:
- node = get_scope_node(collector, scope)
- if node is None:
- assert scope is Scope.Class and isinstance(
- collector, _pytest.python.Module
- )
- # Use module-level collector for class-scope (for now).
- node = collector
- if node is None:
- name2pseudofixturedef = None
- else:
- default: Dict[str, FixtureDef[Any]] = {}
- name2pseudofixturedef = node.stash.setdefault(
- name2pseudofixturedef_key, default
- )
- if name2pseudofixturedef is not None and argname in name2pseudofixturedef:
- arg2fixturedefs[argname] = [name2pseudofixturedef[argname]]
- else:
- fixturedef = FixtureDef(
- fixturemanager=fixturemanager,
- baseid="",
- argname=argname,
- func=get_direct_param_fixture_func,
- scope=arg2scope[argname],
- params=valuelist,
- unittest=False,
- ids=None,
- )
- arg2fixturedefs[argname] = [fixturedef]
- if name2pseudofixturedef is not None:
- name2pseudofixturedef[argname] = fixturedef
-
-
-def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
- """Return fixturemarker or None if it doesn't exist or raised
- exceptions."""
- try:
- fixturemarker: Optional[FixtureFunctionMarker] = getattr(
- obj, "_pytestfixturefunction", None
- )
- except TEST_OUTCOME:
- # some objects raise errors like request (from flask import request)
- # we don't expect them to be fixture functions
- return None
- return fixturemarker
-
-
-# Parametrized fixture key, helper alias for code below.
-_Key = Tuple[object, ...]
-
-
-def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_Key]:
- """Return list of keys for all parametrized arguments which match
- the specified scope."""
- assert scope is not Scope.Function
- try:
- callspec = item.callspec # type: ignore[attr-defined]
- except AttributeError:
- pass
- else:
- cs: CallSpec2 = callspec
- # cs.indices.items() is random order of argnames. Need to
- # sort this so that different calls to
- # get_parametrized_fixture_keys will be deterministic.
- for argname, param_index in sorted(cs.indices.items()):
- if cs._arg2scope[argname] != scope:
- continue
- if scope is Scope.Session:
- key: _Key = (argname, param_index)
- elif scope is Scope.Package:
- key = (argname, param_index, item.path.parent)
- elif scope is Scope.Module:
- key = (argname, param_index, item.path)
- elif scope is Scope.Class:
- item_cls = item.cls # type: ignore[attr-defined]
- key = (argname, param_index, item.path, item_cls)
- else:
- assert_never(scope)
- yield key
-
-
-# Algorithm for sorting on a per-parametrized resource setup basis.
-# It is called for Session scope first and performs sorting
-# down to the lower scopes such as to minimize number of "high scope"
-# setups and teardowns.
-
-
-def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]:
- argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]] = {}
- items_by_argkey: Dict[Scope, Dict[_Key, Deque[nodes.Item]]] = {}
- for scope in HIGH_SCOPES:
- d: Dict[nodes.Item, Dict[_Key, None]] = {}
- argkeys_cache[scope] = d
- item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque)
- items_by_argkey[scope] = item_d
- for item in items:
- keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None)
- if keys:
- d[item] = keys
- for key in keys:
- item_d[key].append(item)
- items_dict = dict.fromkeys(items, None)
- return list(
- reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session)
- )
-
-
-def fix_cache_order(
- item: nodes.Item,
- argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]],
- items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]],
-) -> None:
- for scope in HIGH_SCOPES:
- for key in argkeys_cache[scope].get(item, []):
- items_by_argkey[scope][key].appendleft(item)
-
-
-def reorder_items_atscope(
- items: Dict[nodes.Item, None],
- argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]],
- items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]],
- scope: Scope,
-) -> Dict[nodes.Item, None]:
- if scope is Scope.Function or len(items) < 3:
- return items
- ignore: Set[Optional[_Key]] = set()
- items_deque = deque(items)
- items_done: Dict[nodes.Item, None] = {}
- scoped_items_by_argkey = items_by_argkey[scope]
- scoped_argkeys_cache = argkeys_cache[scope]
- while items_deque:
- no_argkey_group: Dict[nodes.Item, None] = {}
- slicing_argkey = None
- while items_deque:
- item = items_deque.popleft()
- if item in items_done or item in no_argkey_group:
- continue
- argkeys = dict.fromkeys(
- (k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None
- )
- if not argkeys:
- no_argkey_group[item] = None
- else:
- slicing_argkey, _ = argkeys.popitem()
- # We don't have to remove relevant items from later in the
- # deque because they'll just be ignored.
- matching_items = [
- i for i in scoped_items_by_argkey[slicing_argkey] if i in items
- ]
- for i in reversed(matching_items):
- fix_cache_order(i, argkeys_cache, items_by_argkey)
- items_deque.appendleft(i)
- break
- if no_argkey_group:
- no_argkey_group = reorder_items_atscope(
- no_argkey_group, argkeys_cache, items_by_argkey, scope.next_lower()
- )
- for item in no_argkey_group:
- items_done[item] = None
- ignore.add(slicing_argkey)
- return items_done
-
-
-def get_direct_param_fixture_func(request):
- return request.param
-
-
-@attr.s(slots=True, auto_attribs=True)
-class FuncFixtureInfo:
- # Original function argument names.
- argnames: Tuple[str, ...]
- # Argnames that function immediately requires. These include argnames +
- # fixture names specified via usefixtures and via autouse=True in fixture
- # definitions.
- initialnames: Tuple[str, ...]
- names_closure: List[str]
- name2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]]
-
- def prune_dependency_tree(self) -> None:
- """Recompute names_closure from initialnames and name2fixturedefs.
-
- Can only reduce names_closure, which means that the new closure will
- always be a subset of the old one. The order is preserved.
-
- This method is needed because direct parametrization may shadow some
- of the fixtures that were included in the originally built dependency
- tree. In this way the dependency tree can get pruned, and the closure
- of argnames may get reduced.
- """
- closure: Set[str] = set()
- working_set = set(self.initialnames)
- while working_set:
- argname = working_set.pop()
- # Argname may be smth not included in the original names_closure,
- # in which case we ignore it. This currently happens with pseudo
- # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
- # So they introduce the new dependency 'request' which might have
- # been missing in the original tree (closure).
- if argname not in closure and argname in self.names_closure:
- closure.add(argname)
- if argname in self.name2fixturedefs:
- working_set.update(self.name2fixturedefs[argname][-1].argnames)
-
- self.names_closure[:] = sorted(closure, key=self.names_closure.index)
-
-
-class FixtureRequest:
- """A request for a fixture from a test or fixture function.
-
- A request object gives access to the requesting test context and has
- an optional ``param`` attribute in case the fixture is parametrized
- indirectly.
- """
-
- def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None:
- check_ispytest(_ispytest)
- self._pyfuncitem = pyfuncitem
- #: Fixture for which this request is being performed.
- self.fixturename: Optional[str] = None
- self._scope = Scope.Function
- self._fixture_defs: Dict[str, FixtureDef[Any]] = {}
- fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo
- self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
- self._arg2index: Dict[str, int] = {}
- self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager
-
- @property
- def scope(self) -> "_ScopeName":
- """Scope string, one of "function", "class", "module", "package", "session"."""
- return self._scope.value
-
- @property
- def fixturenames(self) -> List[str]:
- """Names of all active fixtures in this request."""
- result = list(self._pyfuncitem._fixtureinfo.names_closure)
- result.extend(set(self._fixture_defs).difference(result))
- return result
-
- @property
- def node(self):
- """Underlying collection node (depends on current request scope)."""
- return self._getscopeitem(self._scope)
-
- def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]":
- fixturedefs = self._arg2fixturedefs.get(argname, None)
- if fixturedefs is None:
- # We arrive here because of a dynamic call to
- # getfixturevalue(argname) usage which was naturally
- # not known at parsing/collection time.
- assert self._pyfuncitem.parent is not None
- parentid = self._pyfuncitem.parent.nodeid
- fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
- # TODO: Fix this type ignore. Either add assert or adjust types.
- # Can this be None here?
- self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment]
- # fixturedefs list is immutable so we maintain a decreasing index.
- index = self._arg2index.get(argname, 0) - 1
- if fixturedefs is None or (-index > len(fixturedefs)):
- raise FixtureLookupError(argname, self)
- self._arg2index[argname] = index
- return fixturedefs[index]
-
- @property
- def config(self) -> Config:
- """The pytest config object associated with this request."""
- return self._pyfuncitem.config # type: ignore[no-any-return]
-
- @property
- def function(self):
- """Test function object if the request has a per-function scope."""
- if self.scope != "function":
- raise AttributeError(
- f"function not available in {self.scope}-scoped context"
- )
- return self._pyfuncitem.obj
-
- @property
- def cls(self):
- """Class (can be None) where the test function was collected."""
- if self.scope not in ("class", "function"):
- raise AttributeError(f"cls not available in {self.scope}-scoped context")
- clscol = self._pyfuncitem.getparent(_pytest.python.Class)
- if clscol:
- return clscol.obj
-
- @property
- def instance(self):
- """Instance (can be None) on which test function was collected."""
- # unittest support hack, see _pytest.unittest.TestCaseFunction.
- try:
- return self._pyfuncitem._testcase
- except AttributeError:
- function = getattr(self, "function", None)
- return getattr(function, "__self__", None)
-
- @property
- def module(self):
- """Python module object where the test function was collected."""
- if self.scope not in ("function", "class", "module"):
- raise AttributeError(f"module not available in {self.scope}-scoped context")
- return self._pyfuncitem.getparent(_pytest.python.Module).obj
-
- @property
- def path(self) -> Path:
- if self.scope not in ("function", "class", "module", "package"):
- raise AttributeError(f"path not available in {self.scope}-scoped context")
- # TODO: Remove ignore once _pyfuncitem is properly typed.
- return self._pyfuncitem.path # type: ignore
-
- @property
- def keywords(self) -> MutableMapping[str, Any]:
- """Keywords/markers dictionary for the underlying node."""
- node: nodes.Node = self.node
- return node.keywords
-
- @property
- def session(self) -> "Session":
- """Pytest session object."""
- return self._pyfuncitem.session # type: ignore[no-any-return]
-
- def addfinalizer(self, finalizer: Callable[[], object]) -> None:
- """Add finalizer/teardown function to be called after the last test
- within the requesting test context finished execution."""
- # XXX usually this method is shadowed by fixturedef specific ones.
- self._addfinalizer(finalizer, scope=self.scope)
-
- def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None:
- node = self._getscopeitem(scope)
- node.addfinalizer(finalizer)
-
- def applymarker(self, marker: Union[str, MarkDecorator]) -> None:
- """Apply a marker to a single test function invocation.
-
- This method is useful if you don't want to have a keyword/marker
- on all function invocations.
-
- :param marker:
- A :class:`pytest.MarkDecorator` object created by a call
- to ``pytest.mark.NAME(...)``.
- """
- self.node.add_marker(marker)
-
- def raiseerror(self, msg: Optional[str]) -> "NoReturn":
- """Raise a FixtureLookupError with the given message."""
- raise self._fixturemanager.FixtureLookupError(None, self, msg)
-
- def _fillfixtures(self) -> None:
- item = self._pyfuncitem
- fixturenames = getattr(item, "fixturenames", self.fixturenames)
- for argname in fixturenames:
- if argname not in item.funcargs:
- item.funcargs[argname] = self.getfixturevalue(argname)
-
- def getfixturevalue(self, argname: str) -> Any:
- """Dynamically run a named fixture function.
-
- Declaring fixtures via function argument is recommended where possible.
- But if you can only decide whether to use another fixture at test
- setup time, you may use this function to retrieve it inside a fixture
- or test function body.
-
- :raises pytest.FixtureLookupError:
- If the given fixture could not be found.
- """
- fixturedef = self._get_active_fixturedef(argname)
- assert fixturedef.cached_result is not None
- return fixturedef.cached_result[0]
-
- def _get_active_fixturedef(
- self, argname: str
- ) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]:
- try:
- return self._fixture_defs[argname]
- except KeyError:
- try:
- fixturedef = self._getnextfixturedef(argname)
- except FixtureLookupError:
- if argname == "request":
- cached_result = (self, [0], None)
- return PseudoFixtureDef(cached_result, Scope.Function)
- raise
- # Remove indent to prevent the python3 exception
- # from leaking into the call.
- self._compute_fixture_value(fixturedef)
- self._fixture_defs[argname] = fixturedef
- return fixturedef
-
- def _get_fixturestack(self) -> List["FixtureDef[Any]"]:
- current = self
- values: List[FixtureDef[Any]] = []
- while isinstance(current, SubRequest):
- values.append(current._fixturedef) # type: ignore[has-type]
- current = current._parent_request
- values.reverse()
- return values
-
- def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None:
- """Create a SubRequest based on "self" and call the execute method
- of the given FixtureDef object.
-
- This will force the FixtureDef object to throw away any previous
- results and compute a new fixture value, which will be stored into
- the FixtureDef object itself.
- """
- # prepare a subrequest object before calling fixture function
- # (latter managed by fixturedef)
- argname = fixturedef.argname
- funcitem = self._pyfuncitem
- scope = fixturedef._scope
- try:
- callspec = funcitem.callspec
- except AttributeError:
- callspec = None
- if callspec is not None and argname in callspec.params:
- param = callspec.params[argname]
- param_index = callspec.indices[argname]
- # If a parametrize invocation set a scope it will override
- # the static scope defined with the fixture function.
- with suppress(KeyError):
- scope = callspec._arg2scope[argname]
- else:
- param = NOTSET
- param_index = 0
- has_params = fixturedef.params is not None
- fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
- if has_params and fixtures_not_supported:
- msg = (
- "{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
- "Node id: {nodeid}\n"
- "Function type: {typename}"
- ).format(
- name=funcitem.name,
- nodeid=funcitem.nodeid,
- typename=type(funcitem).__name__,
- )
- fail(msg, pytrace=False)
- if has_params:
- frame = inspect.stack()[3]
- frameinfo = inspect.getframeinfo(frame[0])
- source_path = absolutepath(frameinfo.filename)
- source_lineno = frameinfo.lineno
- try:
- source_path_str = str(
- source_path.relative_to(funcitem.config.rootpath)
- )
- except ValueError:
- source_path_str = str(source_path)
- msg = (
- "The requested fixture has no parameter defined for test:\n"
- " {}\n\n"
- "Requested fixture '{}' defined in:\n{}"
- "\n\nRequested here:\n{}:{}".format(
- funcitem.nodeid,
- fixturedef.argname,
- getlocation(fixturedef.func, funcitem.config.rootpath),
- source_path_str,
- source_lineno,
- )
- )
- fail(msg, pytrace=False)
-
- subrequest = SubRequest(
- self, scope, param, param_index, fixturedef, _ispytest=True
- )
-
- # Check if a higher-level scoped fixture accesses a lower level one.
- subrequest._check_scope(argname, self._scope, scope)
- try:
- # Call the fixture function.
- fixturedef.execute(request=subrequest)
- finally:
- self._schedule_finalizers(fixturedef, subrequest)
-
- def _schedule_finalizers(
- self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
- ) -> None:
- # If fixture function failed it might have registered finalizers.
- subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest))
-
- def _check_scope(
- self,
- argname: str,
- invoking_scope: Scope,
- requested_scope: Scope,
- ) -> None:
- if argname == "request":
- return
- if invoking_scope > requested_scope:
- # Try to report something helpful.
- text = "\n".join(self._factorytraceback())
- fail(
- f"ScopeMismatch: You tried to access the {requested_scope.value} scoped "
- f"fixture {argname} with a {invoking_scope.value} scoped request object, "
- f"involved factories:\n{text}",
- pytrace=False,
- )
-
- def _factorytraceback(self) -> List[str]:
- lines = []
- for fixturedef in self._get_fixturestack():
- factory = fixturedef.func
- fs, lineno = getfslineno(factory)
- if isinstance(fs, Path):
- session: Session = self._pyfuncitem.session
- p = bestrelpath(session.path, fs)
- else:
- p = fs
- args = _format_args(factory)
- lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args))
- return lines
-
- def _getscopeitem(
- self, scope: Union[Scope, "_ScopeName"]
- ) -> Union[nodes.Item, nodes.Collector]:
- if isinstance(scope, str):
- scope = Scope(scope)
- if scope is Scope.Function:
- # This might also be a non-function Item despite its attribute name.
- node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem
- elif scope is Scope.Package:
- # FIXME: _fixturedef is not defined on FixtureRequest (this class),
- # but on FixtureRequest (a subclass).
- node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined]
- else:
- node = get_scope_node(self._pyfuncitem, scope)
- if node is None and scope is Scope.Class:
- # Fallback to function item itself.
- node = self._pyfuncitem
- assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
- scope, self._pyfuncitem
- )
- return node
-
- def __repr__(self) -> str:
- return "<FixtureRequest for %r>" % (self.node)
-
-
-@final
-class SubRequest(FixtureRequest):
- """A sub request for handling getting a fixture from a test function/fixture."""
-
- def __init__(
- self,
- request: "FixtureRequest",
- scope: Scope,
- param: Any,
- param_index: int,
- fixturedef: "FixtureDef[object]",
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self._parent_request = request
- self.fixturename = fixturedef.argname
- if param is not NOTSET:
- self.param = param
- self.param_index = param_index
- self._scope = scope
- self._fixturedef = fixturedef
- self._pyfuncitem = request._pyfuncitem
- self._fixture_defs = request._fixture_defs
- self._arg2fixturedefs = request._arg2fixturedefs
- self._arg2index = request._arg2index
- self._fixturemanager = request._fixturemanager
-
- def __repr__(self) -> str:
- return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>"
-
- def addfinalizer(self, finalizer: Callable[[], object]) -> None:
- """Add finalizer/teardown function to be called after the last test
- within the requesting test context finished execution."""
- self._fixturedef.addfinalizer(finalizer)
-
- def _schedule_finalizers(
- self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
- ) -> None:
- # If the executing fixturedef was not explicitly requested in the argument list (via
- # getfixturevalue inside the fixture call) then ensure this fixture def will be finished
- # first.
- if fixturedef.argname not in self.fixturenames:
- fixturedef.addfinalizer(
- functools.partial(self._fixturedef.finish, request=self)
- )
- super()._schedule_finalizers(fixturedef, subrequest)
-
-
-@final
-class FixtureLookupError(LookupError):
- """Could not return a requested fixture (missing or invalid)."""
-
- def __init__(
- self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None
- ) -> None:
- self.argname = argname
- self.request = request
- self.fixturestack = request._get_fixturestack()
- self.msg = msg
-
- def formatrepr(self) -> "FixtureLookupErrorRepr":
- tblines: List[str] = []
- addline = tblines.append
- stack = [self.request._pyfuncitem.obj]
- stack.extend(map(lambda x: x.func, self.fixturestack))
- msg = self.msg
- if msg is not None:
- # The last fixture raise an error, let's present
- # it at the requesting side.
- stack = stack[:-1]
- for function in stack:
- fspath, lineno = getfslineno(function)
- try:
- lines, _ = inspect.getsourcelines(get_real_func(function))
- except (OSError, IndexError, TypeError):
- error_msg = "file %s, line %s: source code not available"
- addline(error_msg % (fspath, lineno + 1))
- else:
- addline(f"file {fspath}, line {lineno + 1}")
- for i, line in enumerate(lines):
- line = line.rstrip()
- addline(" " + line)
- if line.lstrip().startswith("def"):
- break
-
- if msg is None:
- fm = self.request._fixturemanager
- available = set()
- parentid = self.request._pyfuncitem.parent.nodeid
- for name, fixturedefs in fm._arg2fixturedefs.items():
- faclist = list(fm._matchfactories(fixturedefs, parentid))
- if faclist:
- available.add(name)
- if self.argname in available:
- msg = " recursive dependency involving fixture '{}' detected".format(
- self.argname
- )
- else:
- msg = f"fixture '{self.argname}' not found"
- msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
- msg += "\n use 'pytest --fixtures [testpath]' for help on them."
-
- return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
-
-
-class FixtureLookupErrorRepr(TerminalRepr):
- def __init__(
- self,
- filename: Union[str, "os.PathLike[str]"],
- firstlineno: int,
- tblines: Sequence[str],
- errorstring: str,
- argname: Optional[str],
- ) -> None:
- self.tblines = tblines
- self.errorstring = errorstring
- self.filename = filename
- self.firstlineno = firstlineno
- self.argname = argname
-
- def toterminal(self, tw: TerminalWriter) -> None:
- # tw.line("FixtureLookupError: %s" %(self.argname), red=True)
- for tbline in self.tblines:
- tw.line(tbline.rstrip())
- lines = self.errorstring.split("\n")
- if lines:
- tw.line(
- f"{FormattedExcinfo.fail_marker} {lines[0].strip()}",
- red=True,
- )
- for line in lines[1:]:
- tw.line(
- f"{FormattedExcinfo.flow_marker} {line.strip()}",
- red=True,
- )
- tw.line()
- tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1))
-
-
-def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn":
- fs, lineno = getfslineno(fixturefunc)
- location = f"{fs}:{lineno + 1}"
- source = _pytest._code.Source(fixturefunc)
- fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
-
-
-def call_fixture_func(
- fixturefunc: "_FixtureFunc[FixtureValue]", request: FixtureRequest, kwargs
-) -> FixtureValue:
- if is_generator(fixturefunc):
- fixturefunc = cast(
- Callable[..., Generator[FixtureValue, None, None]], fixturefunc
- )
- generator = fixturefunc(**kwargs)
- try:
- fixture_result = next(generator)
- except StopIteration:
- raise ValueError(f"{request.fixturename} did not yield a value") from None
- finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator)
- request.addfinalizer(finalizer)
- else:
- fixturefunc = cast(Callable[..., FixtureValue], fixturefunc)
- fixture_result = fixturefunc(**kwargs)
- return fixture_result
-
-
-def _teardown_yield_fixture(fixturefunc, it) -> None:
- """Execute the teardown of a fixture function by advancing the iterator
- after the yield and ensure the iteration ends (if not it means there is
- more than one yield in the function)."""
- try:
- next(it)
- except StopIteration:
- pass
- else:
- fail_fixturefunc(fixturefunc, "fixture function has more than one 'yield'")
-
-
-def _eval_scope_callable(
- scope_callable: "Callable[[str, Config], _ScopeName]",
- fixture_name: str,
- config: Config,
-) -> "_ScopeName":
- try:
- # Type ignored because there is no typing mechanism to specify
- # keyword arguments, currently.
- result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg]
- except Exception as e:
- raise TypeError(
- "Error evaluating {} while defining fixture '{}'.\n"
- "Expected a function with the signature (*, fixture_name, config)".format(
- scope_callable, fixture_name
- )
- ) from e
- if not isinstance(result, str):
- fail(
- "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n"
- "{!r}".format(scope_callable, fixture_name, result),
- pytrace=False,
- )
- return result
-
-
-@final
-class FixtureDef(Generic[FixtureValue]):
- """A container for a fixture definition."""
-
- def __init__(
- self,
- fixturemanager: "FixtureManager",
- baseid: Optional[str],
- argname: str,
- func: "_FixtureFunc[FixtureValue]",
- scope: Union[Scope, "_ScopeName", Callable[[str, Config], "_ScopeName"], None],
- params: Optional[Sequence[object]],
- unittest: bool = False,
- ids: Optional[
- Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]
- ] = None,
- ) -> None:
- self._fixturemanager = fixturemanager
- # The "base" node ID for the fixture.
- #
- # This is a node ID prefix. A fixture is only available to a node (e.g.
- # a `Function` item) if the fixture's baseid is a parent of the node's
- # nodeid (see the `iterparentnodeids` function for what constitutes a
- # "parent" and a "prefix" in this context).
- #
- # For a fixture found in a Collector's object (e.g. a `Module`s module,
- # a `Class`'s class), the baseid is the Collector's nodeid.
- #
- # For a fixture found in a conftest plugin, the baseid is the conftest's
- # directory path relative to the rootdir.
- #
- # For other plugins, the baseid is the empty string (always matches).
- self.baseid = baseid or ""
- # Whether the fixture was found from a node or a conftest in the
- # collection tree. Will be false for fixtures defined in non-conftest
- # plugins.
- self.has_location = baseid is not None
- # The fixture factory function.
- self.func = func
- # The name by which the fixture may be requested.
- self.argname = argname
- if scope is None:
- scope = Scope.Function
- elif callable(scope):
- scope = _eval_scope_callable(scope, argname, fixturemanager.config)
- if isinstance(scope, str):
- scope = Scope.from_user(
- scope, descr=f"Fixture '{func.__name__}'", where=baseid
- )
- self._scope = scope
- # If the fixture is directly parametrized, the parameter values.
- self.params: Optional[Sequence[object]] = params
- # If the fixture is directly parametrized, a tuple of explicit IDs to
- # assign to the parameter values, or a callable to generate an ID given
- # a parameter value.
- self.ids = ids
- # The names requested by the fixtures.
- self.argnames = getfuncargnames(func, name=argname, is_method=unittest)
- # Whether the fixture was collected from a unittest TestCase class.
- # Note that it really only makes sense to define autouse fixtures in
- # unittest TestCases.
- self.unittest = unittest
- # If the fixture was executed, the current value of the fixture.
- # Can change if the fixture is executed with different parameters.
- self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None
- self._finalizers: List[Callable[[], object]] = []
-
- @property
- def scope(self) -> "_ScopeName":
- """Scope string, one of "function", "class", "module", "package", "session"."""
- return self._scope.value
-
- def addfinalizer(self, finalizer: Callable[[], object]) -> None:
- self._finalizers.append(finalizer)
-
- def finish(self, request: SubRequest) -> None:
- exc = None
- try:
- while self._finalizers:
- try:
- func = self._finalizers.pop()
- func()
- except BaseException as e:
- # XXX Only first exception will be seen by user,
- # ideally all should be reported.
- if exc is None:
- exc = e
- if exc:
- raise exc
- finally:
- ihook = request.node.ihook
- ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
- # Even if finalization fails, we invalidate the cached fixture
- # value and remove all finalizers because they may be bound methods
- # which will keep instances alive.
- self.cached_result = None
- self._finalizers = []
-
- def execute(self, request: SubRequest) -> FixtureValue:
- # Get required arguments and register our own finish()
- # with their finalization.
- for argname in self.argnames:
- fixturedef = request._get_active_fixturedef(argname)
- if argname != "request":
- # PseudoFixtureDef is only for "request".
- assert isinstance(fixturedef, FixtureDef)
- fixturedef.addfinalizer(functools.partial(self.finish, request=request))
-
- my_cache_key = self.cache_key(request)
- if self.cached_result is not None:
- # note: comparison with `==` can fail (or be expensive) for e.g.
- # numpy arrays (#6497).
- cache_key = self.cached_result[1]
- if my_cache_key is cache_key:
- if self.cached_result[2] is not None:
- _, val, tb = self.cached_result[2]
- raise val.with_traceback(tb)
- else:
- result = self.cached_result[0]
- return result
- # We have a previous but differently parametrized fixture instance
- # so we need to tear it down before creating a new one.
- self.finish(request)
- assert self.cached_result is None
-
- ihook = request.node.ihook
- result = ihook.pytest_fixture_setup(fixturedef=self, request=request)
- return result
-
- def cache_key(self, request: SubRequest) -> object:
- return request.param_index if not hasattr(request, "param") else request.param
-
- def __repr__(self) -> str:
- return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format(
- self.argname, self.scope, self.baseid
- )
-
-
-def resolve_fixture_function(
- fixturedef: FixtureDef[FixtureValue], request: FixtureRequest
-) -> "_FixtureFunc[FixtureValue]":
- """Get the actual callable that can be called to obtain the fixture
- value, dealing with unittest-specific instances and bound methods."""
- fixturefunc = fixturedef.func
- if fixturedef.unittest:
- if request.instance is not None:
- # Bind the unbound method to the TestCase instance.
- fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr]
- else:
- # The fixture function needs to be bound to the actual
- # request.instance so that code working with "fixturedef" behaves
- # as expected.
- if request.instance is not None:
- # Handle the case where fixture is defined not in a test class, but some other class
- # (for example a plugin class with a fixture), see #2270.
- if hasattr(fixturefunc, "__self__") and not isinstance(
- request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr]
- ):
- return fixturefunc
- fixturefunc = getimfunc(fixturedef.func)
- if fixturefunc != fixturedef.func:
- fixturefunc = fixturefunc.__get__(request.instance) # type: ignore[union-attr]
- return fixturefunc
-
-
-def pytest_fixture_setup(
- fixturedef: FixtureDef[FixtureValue], request: SubRequest
-) -> FixtureValue:
- """Execution of fixture setup."""
- kwargs = {}
- for argname in fixturedef.argnames:
- fixdef = request._get_active_fixturedef(argname)
- assert fixdef.cached_result is not None
- result, arg_cache_key, exc = fixdef.cached_result
- request._check_scope(argname, request._scope, fixdef._scope)
- kwargs[argname] = result
-
- fixturefunc = resolve_fixture_function(fixturedef, request)
- my_cache_key = fixturedef.cache_key(request)
- try:
- result = call_fixture_func(fixturefunc, request, kwargs)
- except TEST_OUTCOME:
- exc_info = sys.exc_info()
- assert exc_info[0] is not None
- fixturedef.cached_result = (None, my_cache_key, exc_info)
- raise
- fixturedef.cached_result = (result, my_cache_key, None)
- return result
-
-
-def _ensure_immutable_ids(
- ids: Optional[Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]]
-) -> Optional[Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]]:
- if ids is None:
- return None
- if callable(ids):
- return ids
- return tuple(ids)
-
-
-def _params_converter(
- params: Optional[Iterable[object]],
-) -> Optional[Tuple[object, ...]]:
- return tuple(params) if params is not None else None
-
-
-def wrap_function_to_error_out_if_called_directly(
- function: FixtureFunction,
- fixture_marker: "FixtureFunctionMarker",
-) -> FixtureFunction:
- """Wrap the given fixture function so we can raise an error about it being called directly,
- instead of used as an argument in a test function."""
- message = (
- 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
- "but are created automatically when test functions request them as parameters.\n"
- "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n"
- "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code."
- ).format(name=fixture_marker.name or function.__name__)
-
- @functools.wraps(function)
- def result(*args, **kwargs):
- fail(message, pytrace=False)
-
- # Keep reference to the original function in our own custom attribute so we don't unwrap
- # further than this point and lose useful wrappings like @mock.patch (#3774).
- result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined]
-
- return cast(FixtureFunction, result)
-
-
-@final
-@attr.s(frozen=True, auto_attribs=True)
-class FixtureFunctionMarker:
- scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"
- params: Optional[Tuple[object, ...]] = attr.ib(converter=_params_converter)
- autouse: bool = False
- ids: Optional[
- Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]
- ] = attr.ib(
- default=None,
- converter=_ensure_immutable_ids,
- )
- name: Optional[str] = None
-
- def __call__(self, function: FixtureFunction) -> FixtureFunction:
- if inspect.isclass(function):
- raise ValueError("class fixtures not supported (maybe in the future)")
-
- if getattr(function, "_pytestfixturefunction", False):
- raise ValueError(
- "fixture is being applied more than once to the same function"
- )
-
- function = wrap_function_to_error_out_if_called_directly(function, self)
-
- name = self.name or function.__name__
- if name == "request":
- location = getlocation(function)
- fail(
- "'request' is a reserved word for fixtures, use another name:\n {}".format(
- location
- ),
- pytrace=False,
- )
-
- # Type ignored because https://github.com/python/mypy/issues/2087.
- function._pytestfixturefunction = self # type: ignore[attr-defined]
- return function
-
-
-@overload
-def fixture(
- fixture_function: FixtureFunction,
- *,
- scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ...,
- params: Optional[Iterable[object]] = ...,
- autouse: bool = ...,
- ids: Optional[
- Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
- ] = ...,
- name: Optional[str] = ...,
-) -> FixtureFunction:
- ...
-
-
-@overload
-def fixture(
- fixture_function: None = ...,
- *,
- scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ...,
- params: Optional[Iterable[object]] = ...,
- autouse: bool = ...,
- ids: Optional[
- Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
- ] = ...,
- name: Optional[str] = None,
-) -> FixtureFunctionMarker:
- ...
-
-
-def fixture(
- fixture_function: Optional[FixtureFunction] = None,
- *,
- scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = "function",
- params: Optional[Iterable[object]] = None,
- autouse: bool = False,
- ids: Optional[
- Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
- ] = None,
- name: Optional[str] = None,
-) -> Union[FixtureFunctionMarker, FixtureFunction]:
- """Decorator to mark a fixture factory function.
-
- This decorator can be used, with or without parameters, to define a
- fixture function.
-
- The name of the fixture function can later be referenced to cause its
- invocation ahead of running tests: test modules or classes can use the
- ``pytest.mark.usefixtures(fixturename)`` marker.
-
- Test functions can directly use fixture names as input arguments in which
- case the fixture instance returned from the fixture function will be
- injected.
-
- Fixtures can provide their values to test functions using ``return`` or
- ``yield`` statements. When using ``yield`` the code block after the
- ``yield`` statement is executed as teardown code regardless of the test
- outcome, and must yield exactly once.
-
- :param scope:
- The scope for which this fixture is shared; one of ``"function"``
- (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``.
-
- This parameter may also be a callable which receives ``(fixture_name, config)``
- as parameters, and must return a ``str`` with one of the values mentioned above.
-
- See :ref:`dynamic scope` in the docs for more information.
-
- :param params:
- An optional list of parameters which will cause multiple invocations
- of the fixture function and all of the tests using it. The current
- parameter is available in ``request.param``.
-
- :param autouse:
- If True, the fixture func is activated for all tests that can see it.
- If False (the default), an explicit reference is needed to activate
- the fixture.
-
- :param ids:
- Sequence of ids each corresponding to the params so that they are
- part of the test id. If no ids are provided they will be generated
- automatically from the params.
-
- :param name:
- The name of the fixture. This defaults to the name of the decorated
- function. If a fixture is used in the same module in which it is
- defined, the function name of the fixture will be shadowed by the
- function arg that requests the fixture; one way to resolve this is to
- name the decorated function ``fixture_<fixturename>`` and then use
- ``@pytest.fixture(name='<fixturename>')``.
- """
- fixture_marker = FixtureFunctionMarker(
- scope=scope,
- params=params,
- autouse=autouse,
- ids=ids,
- name=name,
- )
-
- # Direct decoration.
- if fixture_function:
- return fixture_marker(fixture_function)
-
- return fixture_marker
-
-
-def yield_fixture(
- fixture_function=None,
- *args,
- scope="function",
- params=None,
- autouse=False,
- ids=None,
- name=None,
-):
- """(Return a) decorator to mark a yield-fixture factory function.
-
- .. deprecated:: 3.0
- Use :py:func:`pytest.fixture` directly instead.
- """
- warnings.warn(YIELD_FIXTURE, stacklevel=2)
- return fixture(
- fixture_function,
- *args,
- scope=scope,
- params=params,
- autouse=autouse,
- ids=ids,
- name=name,
- )
-
-
-@fixture(scope="session")
-def pytestconfig(request: FixtureRequest) -> Config:
- """Session-scoped fixture that returns the session's :class:`pytest.Config`
- object.
-
- Example::
-
- def test_foo(pytestconfig):
- if pytestconfig.getoption("verbose") > 0:
- ...
-
- """
- return request.config
-
-
-def pytest_addoption(parser: Parser) -> None:
- parser.addini(
- "usefixtures",
- type="args",
- default=[],
- help="list of default fixtures to be used with this project",
- )
-
-
-class FixtureManager:
- """pytest fixture definitions and information is stored and managed
- from this class.
-
- During collection fm.parsefactories() is called multiple times to parse
- fixture function definitions into FixtureDef objects and internal
- data structures.
-
- During collection of test functions, metafunc-mechanics instantiate
- a FuncFixtureInfo object which is cached per node/func-name.
- This FuncFixtureInfo object is later retrieved by Function nodes
- which themselves offer a fixturenames attribute.
-
- The FuncFixtureInfo object holds information about fixtures and FixtureDefs
- relevant for a particular function. An initial list of fixtures is
- assembled like this:
-
- - ini-defined usefixtures
- - autouse-marked fixtures along the collection chain up from the function
- - usefixtures markers at module/class/function level
- - test function funcargs
-
- Subsequently the funcfixtureinfo.fixturenames attribute is computed
- as the closure of the fixtures needed to setup the initial fixtures,
- i.e. fixtures needed by fixture functions themselves are appended
- to the fixturenames list.
-
- Upon the test-setup phases all fixturenames are instantiated, retrieved
- by a lookup of their FuncFixtureInfo.
- """
-
- FixtureLookupError = FixtureLookupError
- FixtureLookupErrorRepr = FixtureLookupErrorRepr
-
- def __init__(self, session: "Session") -> None:
- self.session = session
- self.config: Config = session.config
- self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {}
- self._holderobjseen: Set[object] = set()
- # A mapping from a nodeid to a list of autouse fixtures it defines.
- self._nodeid_autousenames: Dict[str, List[str]] = {
- "": self.config.getini("usefixtures"),
- }
- session.config.pluginmanager.register(self, "funcmanage")
-
- def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]:
- """Return all direct parametrization arguments of a node, so we don't
- mistake them for fixtures.
-
- Check https://github.com/pytest-dev/pytest/issues/5036.
-
- These things are done later as well when dealing with parametrization
- so this could be improved.
- """
- parametrize_argnames: List[str] = []
- for marker in node.iter_markers(name="parametrize"):
- if not marker.kwargs.get("indirect", False):
- p_argnames, _ = ParameterSet._parse_parametrize_args(
- *marker.args, **marker.kwargs
- )
- parametrize_argnames.extend(p_argnames)
-
- return parametrize_argnames
-
- def getfixtureinfo(
- self, node: nodes.Node, func, cls, funcargs: bool = True
- ) -> FuncFixtureInfo:
- if funcargs and not getattr(node, "nofuncargs", False):
- argnames = getfuncargnames(func, name=node.name, cls=cls)
- else:
- argnames = ()
-
- usefixtures = tuple(
- arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args
- )
- initialnames = usefixtures + argnames
- fm = node.session._fixturemanager
- initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
- initialnames, node, ignore_args=self._get_direct_parametrize_args(node)
- )
- return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
-
- def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
- nodeid = None
- try:
- p = absolutepath(plugin.__file__) # type: ignore[attr-defined]
- except AttributeError:
- pass
- else:
- # Construct the base nodeid which is later used to check
- # what fixtures are visible for particular tests (as denoted
- # by their test id).
- if p.name.startswith("conftest.py"):
- try:
- nodeid = str(p.parent.relative_to(self.config.rootpath))
- except ValueError:
- nodeid = ""
- if nodeid == ".":
- nodeid = ""
- if os.sep != nodes.SEP:
- nodeid = nodeid.replace(os.sep, nodes.SEP)
-
- self.parsefactories(plugin, nodeid)
-
- def _getautousenames(self, nodeid: str) -> Iterator[str]:
- """Return the names of autouse fixtures applicable to nodeid."""
- for parentnodeid in nodes.iterparentnodeids(nodeid):
- basenames = self._nodeid_autousenames.get(parentnodeid)
- if basenames:
- yield from basenames
-
- def getfixtureclosure(
- self,
- fixturenames: Tuple[str, ...],
- parentnode: nodes.Node,
- ignore_args: Sequence[str] = (),
- ) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]:
- # Collect the closure of all fixtures, starting with the given
- # fixturenames as the initial set. As we have to visit all
- # factory definitions anyway, we also return an arg2fixturedefs
- # mapping so that the caller can reuse it and does not have
- # to re-discover fixturedefs again for each fixturename
- # (discovering matching fixtures for a given name/node is expensive).
-
- parentid = parentnode.nodeid
- fixturenames_closure = list(self._getautousenames(parentid))
-
- def merge(otherlist: Iterable[str]) -> None:
- for arg in otherlist:
- if arg not in fixturenames_closure:
- fixturenames_closure.append(arg)
-
- merge(fixturenames)
-
- # At this point, fixturenames_closure contains what we call "initialnames",
- # which is a set of fixturenames the function immediately requests. We
- # need to return it as well, so save this.
- initialnames = tuple(fixturenames_closure)
-
- arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {}
- lastlen = -1
- while lastlen != len(fixturenames_closure):
- lastlen = len(fixturenames_closure)
- for argname in fixturenames_closure:
- if argname in ignore_args:
- continue
- if argname in arg2fixturedefs:
- continue
- fixturedefs = self.getfixturedefs(argname, parentid)
- if fixturedefs:
- arg2fixturedefs[argname] = fixturedefs
- merge(fixturedefs[-1].argnames)
-
- def sort_by_scope(arg_name: str) -> Scope:
- try:
- fixturedefs = arg2fixturedefs[arg_name]
- except KeyError:
- return Scope.Function
- else:
- return fixturedefs[-1]._scope
-
- fixturenames_closure.sort(key=sort_by_scope, reverse=True)
- return initialnames, fixturenames_closure, arg2fixturedefs
-
- def pytest_generate_tests(self, metafunc: "Metafunc") -> None:
- """Generate new tests based on parametrized fixtures used by the given metafunc"""
-
- def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]:
- args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs)
- return args
-
- for argname in metafunc.fixturenames:
- # Get the FixtureDefs for the argname.
- fixture_defs = metafunc._arg2fixturedefs.get(argname)
- if not fixture_defs:
- # Will raise FixtureLookupError at setup time if not parametrized somewhere
- # else (e.g @pytest.mark.parametrize)
- continue
-
- # If the test itself parametrizes using this argname, give it
- # precedence.
- if any(
- argname in get_parametrize_mark_argnames(mark)
- for mark in metafunc.definition.iter_markers("parametrize")
- ):
- continue
-
- # In the common case we only look at the fixture def with the
- # closest scope (last in the list). But if the fixture overrides
- # another fixture, while requesting the super fixture, keep going
- # in case the super fixture is parametrized (#1953).
- for fixturedef in reversed(fixture_defs):
- # Fixture is parametrized, apply it and stop.
- if fixturedef.params is not None:
- metafunc.parametrize(
- argname,
- fixturedef.params,
- indirect=True,
- scope=fixturedef.scope,
- ids=fixturedef.ids,
- )
- break
-
- # Not requesting the overridden super fixture, stop.
- if argname not in fixturedef.argnames:
- break
-
- # Try next super fixture, if any.
-
- def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None:
- # Separate parametrized setups.
- items[:] = reorder_items(items)
-
- def parsefactories(
- self, node_or_obj, nodeid=NOTSET, unittest: bool = False
- ) -> None:
- if nodeid is not NOTSET:
- holderobj = node_or_obj
- else:
- holderobj = node_or_obj.obj
- nodeid = node_or_obj.nodeid
- if holderobj in self._holderobjseen:
- return
-
- self._holderobjseen.add(holderobj)
- autousenames = []
- for name in dir(holderobj):
- # ugly workaround for one of the fspath deprecated property of node
- # todo: safely generalize
- if isinstance(holderobj, nodes.Node) and name == "fspath":
- continue
-
- # The attribute can be an arbitrary descriptor, so the attribute
- # access below can raise. safe_getatt() ignores such exceptions.
- obj = safe_getattr(holderobj, name, None)
- marker = getfixturemarker(obj)
- if not isinstance(marker, FixtureFunctionMarker):
- # Magic globals with __getattr__ might have got us a wrong
- # fixture attribute.
- continue
-
- if marker.name:
- name = marker.name
-
- # During fixture definition we wrap the original fixture function
- # to issue a warning if called directly, so here we unwrap it in
- # order to not emit the warning when pytest itself calls the
- # fixture function.
- obj = get_real_method(obj, holderobj)
-
- fixture_def = FixtureDef(
- fixturemanager=self,
- baseid=nodeid,
- argname=name,
- func=obj,
- scope=marker.scope,
- params=marker.params,
- unittest=unittest,
- ids=marker.ids,
- )
-
- faclist = self._arg2fixturedefs.setdefault(name, [])
- if fixture_def.has_location:
- faclist.append(fixture_def)
- else:
- # fixturedefs with no location are at the front
- # so this inserts the current fixturedef after the
- # existing fixturedefs from external plugins but
- # before the fixturedefs provided in conftests.
- i = len([f for f in faclist if not f.has_location])
- faclist.insert(i, fixture_def)
- if marker.autouse:
- autousenames.append(name)
-
- if autousenames:
- self._nodeid_autousenames.setdefault(nodeid or "", []).extend(autousenames)
-
- def getfixturedefs(
- self, argname: str, nodeid: str
- ) -> Optional[Sequence[FixtureDef[Any]]]:
- """Get a list of fixtures which are applicable to the given node id.
-
- :param str argname: Name of the fixture to search for.
- :param str nodeid: Full node id of the requesting test.
- :rtype: Sequence[FixtureDef]
- """
- try:
- fixturedefs = self._arg2fixturedefs[argname]
- except KeyError:
- return None
- return tuple(self._matchfactories(fixturedefs, nodeid))
-
- def _matchfactories(
- self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str
- ) -> Iterator[FixtureDef[Any]]:
- parentnodeids = set(nodes.iterparentnodeids(nodeid))
- for fixturedef in fixturedefs:
- if fixturedef.baseid in parentnodeids:
- yield fixturedef
diff --git a/contrib/python/pytest/py3/_pytest/freeze_support.py b/contrib/python/pytest/py3/_pytest/freeze_support.py
deleted file mode 100644
index 9f8ea231fe..0000000000
--- a/contrib/python/pytest/py3/_pytest/freeze_support.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Provides a function to report all internal modules for using freezing
-tools."""
-import types
-from typing import Iterator
-from typing import List
-from typing import Union
-
-
-def freeze_includes() -> List[str]:
- """Return a list of module names used by pytest that should be
- included by cx_freeze."""
- import _pytest
-
- result = list(_iter_all_modules(_pytest))
- return result
-
-
-def _iter_all_modules(
- package: Union[str, types.ModuleType],
- prefix: str = "",
-) -> Iterator[str]:
- """Iterate over the names of all modules that can be found in the given
- package, recursively.
-
- >>> import _pytest
- >>> list(_iter_all_modules(_pytest))
- ['_pytest._argcomplete', '_pytest._code.code', ...]
- """
- import os
- import pkgutil
-
- if isinstance(package, str):
- path = package
- else:
- # Type ignored because typeshed doesn't define ModuleType.__path__
- # (only defined on packages).
- package_path = package.__path__ # type: ignore[attr-defined]
- path, prefix = package_path[0], package.__name__ + "."
- for _, name, is_package in pkgutil.iter_modules([path]):
- if is_package:
- for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
- yield prefix + m
- else:
- yield prefix + name
diff --git a/contrib/python/pytest/py3/_pytest/helpconfig.py b/contrib/python/pytest/py3/_pytest/helpconfig.py
deleted file mode 100644
index aca2cd391e..0000000000
--- a/contrib/python/pytest/py3/_pytest/helpconfig.py
+++ /dev/null
@@ -1,264 +0,0 @@
-"""Version info, help messages, tracing configuration."""
-import os
-import sys
-from argparse import Action
-from typing import List
-from typing import Optional
-from typing import Union
-
-import pytest
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import PrintHelp
-from _pytest.config.argparsing import Parser
-
-
-class HelpAction(Action):
- """An argparse Action that will raise an exception in order to skip the
- rest of the argument parsing when --help is passed.
-
- This prevents argparse from quitting due to missing required arguments
- when any are defined, for example by ``pytest_addoption``.
- This is similar to the way that the builtin argparse --help option is
- implemented by raising SystemExit.
- """
-
- def __init__(self, option_strings, dest=None, default=False, help=None):
- super().__init__(
- option_strings=option_strings,
- dest=dest,
- const=True,
- default=default,
- nargs=0,
- help=help,
- )
-
- def __call__(self, parser, namespace, values, option_string=None):
- setattr(namespace, self.dest, self.const)
-
- # We should only skip the rest of the parsing after preparse is done.
- if getattr(parser._parser, "after_preparse", False):
- raise PrintHelp
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("debugconfig")
- group.addoption(
- "--version",
- "-V",
- action="count",
- default=0,
- dest="version",
- help="display pytest version and information about plugins. "
- "When given twice, also display information about plugins.",
- )
- group._addoption(
- "-h",
- "--help",
- action=HelpAction,
- dest="help",
- help="show help message and configuration info",
- )
- group._addoption(
- "-p",
- action="append",
- dest="plugins",
- default=[],
- metavar="name",
- help="early-load given plugin module name or entry point (multi-allowed).\n"
- "To avoid loading of plugins, use the `no:` prefix, e.g. "
- "`no:doctest`.",
- )
- group.addoption(
- "--traceconfig",
- "--trace-config",
- action="store_true",
- default=False,
- help="trace considerations of conftest.py files.",
- )
- group.addoption(
- "--debug",
- action="store",
- nargs="?",
- const="pytestdebug.log",
- dest="debug",
- metavar="DEBUG_FILE_NAME",
- help="store internal tracing debug information in this log file.\n"
- "This file is opened with 'w' and truncated as a result, care advised.\n"
- "Defaults to 'pytestdebug.log'.",
- )
- group._addoption(
- "-o",
- "--override-ini",
- dest="override_ini",
- action="append",
- help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
- )
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_cmdline_parse():
- outcome = yield
- config: Config = outcome.get_result()
-
- if config.option.debug:
- # --debug | --debug <file.log> was provided.
- path = config.option.debug
- debugfile = open(path, "w")
- debugfile.write(
- "versions pytest-%s, "
- "python-%s\ncwd=%s\nargs=%s\n\n"
- % (
- pytest.__version__,
- ".".join(map(str, sys.version_info)),
- os.getcwd(),
- config.invocation_params.args,
- )
- )
- config.trace.root.setwriter(debugfile.write)
- undo_tracing = config.pluginmanager.enable_tracing()
- sys.stderr.write("writing pytest debug information to %s\n" % path)
-
- def unset_tracing() -> None:
- debugfile.close()
- sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name)
- config.trace.root.setwriter(None)
- undo_tracing()
-
- config.add_cleanup(unset_tracing)
-
-
-def showversion(config: Config) -> None:
- if config.option.version > 1:
- sys.stdout.write(
- "This is pytest version {}, imported from {}\n".format(
- pytest.__version__, pytest.__file__
- )
- )
- plugininfo = getpluginversioninfo(config)
- if plugininfo:
- for line in plugininfo:
- sys.stdout.write(line + "\n")
- else:
- sys.stdout.write(f"pytest {pytest.__version__}\n")
-
-
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- if config.option.version > 0:
- showversion(config)
- return 0
- elif config.option.help:
- config._do_configure()
- showhelp(config)
- config._ensure_unconfigure()
- return 0
- return None
-
-
-def showhelp(config: Config) -> None:
- import textwrap
-
- reporter = config.pluginmanager.get_plugin("terminalreporter")
- tw = reporter._tw
- tw.write(config._parser.optparser.format_help())
- tw.line()
- tw.line(
- "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:"
- )
- tw.line()
-
- columns = tw.fullwidth # costly call
- indent_len = 24 # based on argparse's max_help_position=24
- indent = " " * indent_len
- for name in config._parser._ininames:
- help, type, default = config._parser._inidict[name]
- if type is None:
- type = "string"
- if help is None:
- raise TypeError(f"help argument cannot be None for {name}")
- spec = f"{name} ({type}):"
- tw.write(" %s" % spec)
- spec_len = len(spec)
- if spec_len > (indent_len - 3):
- # Display help starting at a new line.
- tw.line()
- helplines = textwrap.wrap(
- help,
- columns,
- initial_indent=indent,
- subsequent_indent=indent,
- break_on_hyphens=False,
- )
-
- for line in helplines:
- tw.line(line)
- else:
- # Display help starting after the spec, following lines indented.
- tw.write(" " * (indent_len - spec_len - 2))
- wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False)
-
- if wrapped:
- tw.line(wrapped[0])
- for line in wrapped[1:]:
- tw.line(indent + line)
-
- tw.line()
- tw.line("environment variables:")
- vars = [
- ("PYTEST_ADDOPTS", "extra command line options"),
- ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
- ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"),
- ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
- ]
- for name, help in vars:
- tw.line(f" {name:<24} {help}")
- tw.line()
- tw.line()
-
- tw.line("to see available markers type: pytest --markers")
- tw.line("to see available fixtures type: pytest --fixtures")
- tw.line(
- "(shown according to specified file_or_dir or current dir "
- "if not specified; fixtures with leading '_' are only shown "
- "with the '-v' option"
- )
-
- for warningreport in reporter.stats.get("warnings", []):
- tw.line("warning : " + warningreport.message, red=True)
- return
-
-
-conftest_options = [("pytest_plugins", "list of plugin names to load")]
-
-
-def getpluginversioninfo(config: Config) -> List[str]:
- lines = []
- plugininfo = config.pluginmanager.list_plugin_distinfo()
- if plugininfo:
- lines.append("setuptools registered plugins:")
- for plugin, dist in plugininfo:
- loc = getattr(plugin, "__file__", repr(plugin))
- content = f"{dist.project_name}-{dist.version} at {loc}"
- lines.append(" " + content)
- return lines
-
-
-def pytest_report_header(config: Config) -> List[str]:
- lines = []
- if config.option.debug or config.option.traceconfig:
- lines.append(f"using: pytest-{pytest.__version__}")
-
- verinfo = getpluginversioninfo(config)
- if verinfo:
- lines.extend(verinfo)
-
- if config.option.traceconfig:
- lines.append("active plugins:")
- items = config.pluginmanager.list_name_plugin()
- for name, plugin in items:
- if hasattr(plugin, "__file__"):
- r = plugin.__file__
- else:
- r = repr(plugin)
- lines.append(f" {name:<20}: {r}")
- return lines
diff --git a/contrib/python/pytest/py3/_pytest/hookspec.py b/contrib/python/pytest/py3/_pytest/hookspec.py
deleted file mode 100644
index a03c0e9ab7..0000000000
--- a/contrib/python/pytest/py3/_pytest/hookspec.py
+++ /dev/null
@@ -1,892 +0,0 @@
-"""Hook specifications for pytest plugins which are invoked by pytest itself
-and by builtin plugins."""
-from pathlib import Path
-from typing import Any
-from typing import Dict
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-from pluggy import HookspecMarker
-
-from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK
-
-if TYPE_CHECKING:
- import pdb
- import warnings
- from typing_extensions import Literal
-
- from _pytest._code.code import ExceptionRepr
- from _pytest.code import ExceptionInfo
- from _pytest.config import Config
- from _pytest.config import ExitCode
- from _pytest.config import PytestPluginManager
- from _pytest.config import _PluggyPlugin
- from _pytest.config.argparsing import Parser
- from _pytest.fixtures import FixtureDef
- from _pytest.fixtures import SubRequest
- from _pytest.main import Session
- from _pytest.nodes import Collector
- from _pytest.nodes import Item
- from _pytest.outcomes import Exit
- from _pytest.python import Class
- from _pytest.python import Function
- from _pytest.python import Metafunc
- from _pytest.python import Module
- from _pytest.reports import CollectReport
- from _pytest.reports import TestReport
- from _pytest.runner import CallInfo
- from _pytest.terminal import TerminalReporter
- from _pytest.compat import LEGACY_PATH
-
-
-hookspec = HookspecMarker("pytest")
-
-# -------------------------------------------------------------------------
-# Initialization hooks called for every plugin
-# -------------------------------------------------------------------------
-
-
-@hookspec(historic=True)
-def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None:
- """Called at plugin registration time to allow adding new hooks via a call to
- ``pluginmanager.add_hookspecs(module_or_class, prefix)``.
-
- :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager.
-
- .. note::
- This hook is incompatible with ``hookwrapper=True``.
- """
-
-
-@hookspec(historic=True)
-def pytest_plugin_registered(
- plugin: "_PluggyPlugin", manager: "PytestPluginManager"
-) -> None:
- """A new pytest plugin got registered.
-
- :param plugin: The plugin module or instance.
- :param pytest.PytestPluginManager manager: pytest plugin manager.
-
- .. note::
- This hook is incompatible with ``hookwrapper=True``.
- """
-
-
-@hookspec(historic=True)
-def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None:
- """Register argparse-style options and ini-style config values,
- called once at the beginning of a test run.
-
- .. note::
-
- This function should be implemented only in plugins or ``conftest.py``
- files situated at the tests root directory due to how pytest
- :ref:`discovers plugins during startup <pluginorder>`.
-
- :param pytest.Parser parser:
- To add command line options, call
- :py:func:`parser.addoption(...) <pytest.Parser.addoption>`.
- To add ini-file values call :py:func:`parser.addini(...)
- <pytest.Parser.addini>`.
-
- :param pytest.PytestPluginManager pluginmanager:
- The pytest plugin manager, which can be used to install :py:func:`hookspec`'s
- or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks
- to change how command line options are added.
-
- Options can later be accessed through the
- :py:class:`config <pytest.Config>` object, respectively:
-
- - :py:func:`config.getoption(name) <pytest.Config.getoption>` to
- retrieve the value of a command line option.
-
- - :py:func:`config.getini(name) <pytest.Config.getini>` to retrieve
- a value read from an ini-style file.
-
- The config object is passed around on many internal objects via the ``.config``
- attribute or can be retrieved as the ``pytestconfig`` fixture.
-
- .. note::
- This hook is incompatible with ``hookwrapper=True``.
- """
-
-
-@hookspec(historic=True)
-def pytest_configure(config: "Config") -> None:
- """Allow plugins and conftest files to perform initial configuration.
-
- This hook is called for every plugin and initial conftest file
- after command line options have been parsed.
-
- After that, the hook is called for other conftest files as they are
- imported.
-
- .. note::
- This hook is incompatible with ``hookwrapper=True``.
-
- :param pytest.Config config: The pytest config object.
- """
-
-
-# -------------------------------------------------------------------------
-# Bootstrapping hooks called for plugins registered early enough:
-# internal and 3rd party plugins.
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_cmdline_parse(
- pluginmanager: "PytestPluginManager", args: List[str]
-) -> Optional["Config"]:
- """Return an initialized config object, parsing the specified args.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- .. note::
- This hook will only be called for plugin classes passed to the
- ``plugins`` arg when using `pytest.main`_ to perform an in-process
- test run.
-
- :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager.
- :param List[str] args: List of arguments passed on the command line.
- """
-
-
-@hookspec(warn_on_impl=WARNING_CMDLINE_PREPARSE_HOOK)
-def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None:
- """(**Deprecated**) modify command line arguments before option parsing.
-
- This hook is considered deprecated and will be removed in a future pytest version. Consider
- using :hook:`pytest_load_initial_conftests` instead.
-
- .. note::
- This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
-
- :param pytest.Config config: The pytest config object.
- :param List[str] args: Arguments passed on the command line.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]:
- """Called for performing the main command line action. The default
- implementation will invoke the configure hooks and runtest_mainloop.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- :param pytest.Config config: The pytest config object.
- """
-
-
-def pytest_load_initial_conftests(
- early_config: "Config", parser: "Parser", args: List[str]
-) -> None:
- """Called to implement the loading of initial conftest files ahead
- of command line option parsing.
-
- .. note::
- This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
-
- :param pytest.Config early_config: The pytest config object.
- :param List[str] args: Arguments passed on the command line.
- :param pytest.Parser parser: To add command line options.
- """
-
-
-# -------------------------------------------------------------------------
-# collection hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_collection(session: "Session") -> Optional[object]:
- """Perform the collection phase for the given session.
-
- Stops at first non-None result, see :ref:`firstresult`.
- The return value is not used, but only stops further processing.
-
- The default collection phase is this (see individual hooks for full details):
-
- 1. Starting from ``session`` as the initial collector:
-
- 1. ``pytest_collectstart(collector)``
- 2. ``report = pytest_make_collect_report(collector)``
- 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred
- 4. For each collected node:
-
- 1. If an item, ``pytest_itemcollected(item)``
- 2. If a collector, recurse into it.
-
- 5. ``pytest_collectreport(report)``
-
- 2. ``pytest_collection_modifyitems(session, config, items)``
-
- 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times)
-
- 3. ``pytest_collection_finish(session)``
- 4. Set ``session.items`` to the list of collected items
- 5. Set ``session.testscollected`` to the number of collected items
-
- You can implement this hook to only perform some action before collection,
- for example the terminal plugin uses it to start displaying the collection
- counter (and returns `None`).
-
- :param pytest.Session session: The pytest session object.
- """
-
-
-def pytest_collection_modifyitems(
- session: "Session", config: "Config", items: List["Item"]
-) -> None:
- """Called after collection has been performed. May filter or re-order
- the items in-place.
-
- :param pytest.Session session: The pytest session object.
- :param pytest.Config config: The pytest config object.
- :param List[pytest.Item] items: List of item objects.
- """
-
-
-def pytest_collection_finish(session: "Session") -> None:
- """Called after collection has been performed and modified.
-
- :param pytest.Session session: The pytest session object.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_ignore_collect(
- collection_path: Path, path: "LEGACY_PATH", config: "Config"
-) -> Optional[bool]:
- """Return True to prevent considering this path for collection.
-
- This hook is consulted for all files and directories prior to calling
- more specific hooks.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- :param pathlib.Path collection_path : The path to analyze.
- :param LEGACY_PATH path: The path to analyze (deprecated).
- :param pytest.Config config: The pytest config object.
-
- .. versionchanged:: 7.0.0
- The ``collection_path`` parameter was added as a :class:`pathlib.Path`
- equivalent of the ``path`` parameter. The ``path`` parameter
- has been deprecated.
- """
-
-
-def pytest_collect_file(
- file_path: Path, path: "LEGACY_PATH", parent: "Collector"
-) -> "Optional[Collector]":
- """Create a Collector for the given path, or None if not relevant.
-
- The new node needs to have the specified ``parent`` as a parent.
-
- :param pathlib.Path file_path: The path to analyze.
- :param LEGACY_PATH path: The path to collect (deprecated).
-
- .. versionchanged:: 7.0.0
- The ``file_path`` parameter was added as a :class:`pathlib.Path`
- equivalent of the ``path`` parameter. The ``path`` parameter
- has been deprecated.
- """
-
-
-# logging hooks for collection
-
-
-def pytest_collectstart(collector: "Collector") -> None:
- """Collector starts collecting."""
-
-
-def pytest_itemcollected(item: "Item") -> None:
- """We just collected a test item."""
-
-
-def pytest_collectreport(report: "CollectReport") -> None:
- """Collector finished collecting."""
-
-
-def pytest_deselected(items: Sequence["Item"]) -> None:
- """Called for deselected test items, e.g. by keyword.
-
- May be called multiple times.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]":
- """Perform :func:`collector.collect() <pytest.Collector.collect>` and return
- a :class:`~pytest.CollectReport`.
-
- Stops at first non-None result, see :ref:`firstresult`.
- """
-
-
-# -------------------------------------------------------------------------
-# Python test function related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_pycollect_makemodule(
- module_path: Path, path: "LEGACY_PATH", parent
-) -> Optional["Module"]:
- """Return a Module collector or None for the given path.
-
- This hook will be called for each matching test module path.
- The pytest_collect_file hook needs to be used if you want to
- create test modules for files that do not match as a test module.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- :param pathlib.Path module_path: The path of the module to collect.
- :param LEGACY_PATH path: The path of the module to collect (deprecated).
-
- .. versionchanged:: 7.0.0
- The ``module_path`` parameter was added as a :class:`pathlib.Path`
- equivalent of the ``path`` parameter.
-
- The ``path`` parameter has been deprecated in favor of ``fspath``.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_pycollect_makeitem(
- collector: Union["Module", "Class"], name: str, obj: object
-) -> Union[None, "Item", "Collector", List[Union["Item", "Collector"]]]:
- """Return a custom item/collector for a Python object in a module, or None.
-
- Stops at first non-None result, see :ref:`firstresult`.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
- """Call underlying test function.
-
- Stops at first non-None result, see :ref:`firstresult`.
- """
-
-
-def pytest_generate_tests(metafunc: "Metafunc") -> None:
- """Generate (multiple) parametrized calls to a test function."""
-
-
-@hookspec(firstresult=True)
-def pytest_make_parametrize_id(
- config: "Config", val: object, argname: str
-) -> Optional[str]:
- """Return a user-friendly string representation of the given ``val``
- that will be used by @pytest.mark.parametrize calls, or None if the hook
- doesn't know about ``val``.
-
- The parameter name is available as ``argname``, if required.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- :param pytest.Config config: The pytest config object.
- :param val: The parametrized value.
- :param str argname: The automatic parameter name produced by pytest.
- """
-
-
-# -------------------------------------------------------------------------
-# runtest related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_runtestloop(session: "Session") -> Optional[object]:
- """Perform the main runtest loop (after collection finished).
-
- The default hook implementation performs the runtest protocol for all items
- collected in the session (``session.items``), unless the collection failed
- or the ``collectonly`` pytest option is set.
-
- If at any point :py:func:`pytest.exit` is called, the loop is
- terminated immediately.
-
- If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the
- loop is terminated after the runtest protocol for the current item is finished.
-
- :param pytest.Session session: The pytest session object.
-
- Stops at first non-None result, see :ref:`firstresult`.
- The return value is not used, but only stops further processing.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_runtest_protocol(
- item: "Item", nextitem: "Optional[Item]"
-) -> Optional[object]:
- """Perform the runtest protocol for a single test item.
-
- The default runtest protocol is this (see individual hooks for full details):
-
- - ``pytest_runtest_logstart(nodeid, location)``
-
- - Setup phase:
- - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``)
- - ``report = pytest_runtest_makereport(item, call)``
- - ``pytest_runtest_logreport(report)``
- - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
-
- - Call phase, if the the setup passed and the ``setuponly`` pytest option is not set:
- - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``)
- - ``report = pytest_runtest_makereport(item, call)``
- - ``pytest_runtest_logreport(report)``
- - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
-
- - Teardown phase:
- - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``)
- - ``report = pytest_runtest_makereport(item, call)``
- - ``pytest_runtest_logreport(report)``
- - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
-
- - ``pytest_runtest_logfinish(nodeid, location)``
-
- :param item: Test item for which the runtest protocol is performed.
- :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend).
-
- Stops at first non-None result, see :ref:`firstresult`.
- The return value is not used, but only stops further processing.
- """
-
-
-def pytest_runtest_logstart(
- nodeid: str, location: Tuple[str, Optional[int], str]
-) -> None:
- """Called at the start of running the runtest protocol for a single item.
-
- See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
-
- :param str nodeid: Full node ID of the item.
- :param location: A tuple of ``(filename, lineno, testname)``.
- """
-
-
-def pytest_runtest_logfinish(
- nodeid: str, location: Tuple[str, Optional[int], str]
-) -> None:
- """Called at the end of running the runtest protocol for a single item.
-
- See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
-
- :param str nodeid: Full node ID of the item.
- :param location: A tuple of ``(filename, lineno, testname)``.
- """
-
-
-def pytest_runtest_setup(item: "Item") -> None:
- """Called to perform the setup phase for a test item.
-
- The default implementation runs ``setup()`` on ``item`` and all of its
- parents (which haven't been setup yet). This includes obtaining the
- values of fixtures required by the item (which haven't been obtained
- yet).
- """
-
-
-def pytest_runtest_call(item: "Item") -> None:
- """Called to run the test for test item (the call phase).
-
- The default implementation calls ``item.runtest()``.
- """
-
-
-def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None:
- """Called to perform the teardown phase for a test item.
-
- The default implementation runs the finalizers and calls ``teardown()``
- on ``item`` and all of its parents (which need to be torn down). This
- includes running the teardown phase of fixtures required by the item (if
- they go out of scope).
-
- :param nextitem:
- The scheduled-to-be-next test item (None if no further test item is
- scheduled). This argument is used to perform exact teardowns, i.e.
- calling just enough finalizers so that nextitem only needs to call
- setup functions.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_runtest_makereport(
- item: "Item", call: "CallInfo[None]"
-) -> Optional["TestReport"]:
- """Called to create a :class:`~pytest.TestReport` for each of
- the setup, call and teardown runtest phases of a test item.
-
- See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
-
- :param call: The :class:`~pytest.CallInfo` for the phase.
-
- Stops at first non-None result, see :ref:`firstresult`.
- """
-
-
-def pytest_runtest_logreport(report: "TestReport") -> None:
- """Process the :class:`~pytest.TestReport` produced for each
- of the setup, call and teardown runtest phases of an item.
-
- See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_report_to_serializable(
- config: "Config",
- report: Union["CollectReport", "TestReport"],
-) -> Optional[Dict[str, Any]]:
- """Serialize the given report object into a data structure suitable for
- sending over the wire, e.g. converted to JSON."""
-
-
-@hookspec(firstresult=True)
-def pytest_report_from_serializable(
- config: "Config",
- data: Dict[str, Any],
-) -> Optional[Union["CollectReport", "TestReport"]]:
- """Restore a report object previously serialized with
- :hook:`pytest_report_to_serializable`."""
-
-
-# -------------------------------------------------------------------------
-# Fixture related hooks
-# -------------------------------------------------------------------------
-
-
-@hookspec(firstresult=True)
-def pytest_fixture_setup(
- fixturedef: "FixtureDef[Any]", request: "SubRequest"
-) -> Optional[object]:
- """Perform fixture setup execution.
-
- :returns: The return value of the call to the fixture function.
-
- Stops at first non-None result, see :ref:`firstresult`.
-
- .. note::
- If the fixture function returns None, other implementations of
- this hook function will continue to be called, according to the
- behavior of the :ref:`firstresult` option.
- """
-
-
-def pytest_fixture_post_finalizer(
- fixturedef: "FixtureDef[Any]", request: "SubRequest"
-) -> None:
- """Called after fixture teardown, but before the cache is cleared, so
- the fixture result ``fixturedef.cached_result`` is still available (not
- ``None``)."""
-
-
-# -------------------------------------------------------------------------
-# test session related hooks
-# -------------------------------------------------------------------------
-
-
-def pytest_sessionstart(session: "Session") -> None:
- """Called after the ``Session`` object has been created and before performing collection
- and entering the run test loop.
-
- :param pytest.Session session: The pytest session object.
- """
-
-
-def pytest_sessionfinish(
- session: "Session",
- exitstatus: Union[int, "ExitCode"],
-) -> None:
- """Called after whole test run finished, right before returning the exit status to the system.
-
- :param pytest.Session session: The pytest session object.
- :param int exitstatus: The status which pytest will return to the system.
- """
-
-
-def pytest_unconfigure(config: "Config") -> None:
- """Called before test process is exited.
-
- :param pytest.Config config: The pytest config object.
- """
-
-
-# -------------------------------------------------------------------------
-# hooks for customizing the assert methods
-# -------------------------------------------------------------------------
-
-
-def pytest_assertrepr_compare(
- config: "Config", op: str, left: object, right: object
-) -> Optional[List[str]]:
- """Return explanation for comparisons in failing assert expressions.
-
- Return None for no custom explanation, otherwise return a list
- of strings. The strings will be joined by newlines but any newlines
- *in* a string will be escaped. Note that all but the first line will
- be indented slightly, the intention is for the first line to be a summary.
-
- :param pytest.Config config: The pytest config object.
- """
-
-
-def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None:
- """Called whenever an assertion passes.
-
- .. versionadded:: 5.0
-
- Use this hook to do some processing after a passing assertion.
- The original assertion information is available in the `orig` string
- and the pytest introspected assertion information is available in the
- `expl` string.
-
- This hook must be explicitly enabled by the ``enable_assertion_pass_hook``
- ini-file option:
-
- .. code-block:: ini
-
- [pytest]
- enable_assertion_pass_hook=true
-
- You need to **clean the .pyc** files in your project directory and interpreter libraries
- when enabling this option, as assertions will require to be re-written.
-
- :param pytest.Item item: pytest item object of current test.
- :param int lineno: Line number of the assert statement.
- :param str orig: String with the original assertion.
- :param str expl: String with the assert explanation.
- """
-
-
-# -------------------------------------------------------------------------
-# Hooks for influencing reporting (invoked from _pytest_terminal).
-# -------------------------------------------------------------------------
-
-
-def pytest_report_header(
- config: "Config", start_path: Path, startdir: "LEGACY_PATH"
-) -> Union[str, List[str]]:
- """Return a string or list of strings to be displayed as header info for terminal reporting.
-
- :param pytest.Config config: The pytest config object.
- :param Path start_path: The starting dir.
- :param LEGACY_PATH startdir: The starting dir (deprecated).
-
- .. note::
-
- Lines returned by a plugin are displayed before those of plugins which
- ran before it.
- If you want to have your line(s) displayed first, use
- :ref:`trylast=True <plugin-hookorder>`.
-
- .. note::
-
- This function should be implemented only in plugins or ``conftest.py``
- files situated at the tests root directory due to how pytest
- :ref:`discovers plugins during startup <pluginorder>`.
-
- .. versionchanged:: 7.0.0
- The ``start_path`` parameter was added as a :class:`pathlib.Path`
- equivalent of the ``startdir`` parameter. The ``startdir`` parameter
- has been deprecated.
- """
-
-
-def pytest_report_collectionfinish(
- config: "Config",
- start_path: Path,
- startdir: "LEGACY_PATH",
- items: Sequence["Item"],
-) -> Union[str, List[str]]:
- """Return a string or list of strings to be displayed after collection
- has finished successfully.
-
- These strings will be displayed after the standard "collected X items" message.
-
- .. versionadded:: 3.2
-
- :param pytest.Config config: The pytest config object.
- :param Path start_path: The starting dir.
- :param LEGACY_PATH startdir: The starting dir (deprecated).
- :param items: List of pytest items that are going to be executed; this list should not be modified.
-
- .. note::
-
- Lines returned by a plugin are displayed before those of plugins which
- ran before it.
- If you want to have your line(s) displayed first, use
- :ref:`trylast=True <plugin-hookorder>`.
-
- .. versionchanged:: 7.0.0
- The ``start_path`` parameter was added as a :class:`pathlib.Path`
- equivalent of the ``startdir`` parameter. The ``startdir`` parameter
- has been deprecated.
- """
-
-
-@hookspec(firstresult=True)
-def pytest_report_teststatus(
- report: Union["CollectReport", "TestReport"], config: "Config"
-) -> Tuple[str, str, Union[str, Mapping[str, bool]]]:
- """Return result-category, shortletter and verbose word for status
- reporting.
-
- The result-category is a category in which to count the result, for
- example "passed", "skipped", "error" or the empty string.
-
- The shortletter is shown as testing progresses, for example ".", "s",
- "E" or the empty string.
-
- The verbose word is shown as testing progresses in verbose mode, for
- example "PASSED", "SKIPPED", "ERROR" or the empty string.
-
- pytest may style these implicitly according to the report outcome.
- To provide explicit styling, return a tuple for the verbose word,
- for example ``"rerun", "R", ("RERUN", {"yellow": True})``.
-
- :param report: The report object whose status is to be returned.
- :param config: The pytest config object.
-
- Stops at first non-None result, see :ref:`firstresult`.
- """
-
-
-def pytest_terminal_summary(
- terminalreporter: "TerminalReporter",
- exitstatus: "ExitCode",
- config: "Config",
-) -> None:
- """Add a section to terminal summary reporting.
-
- :param _pytest.terminal.TerminalReporter terminalreporter: The internal terminal reporter object.
- :param int exitstatus: The exit status that will be reported back to the OS.
- :param pytest.Config config: The pytest config object.
-
- .. versionadded:: 4.2
- The ``config`` parameter.
- """
-
-
-@hookspec(historic=True)
-def pytest_warning_recorded(
- warning_message: "warnings.WarningMessage",
- when: "Literal['config', 'collect', 'runtest']",
- nodeid: str,
- location: Optional[Tuple[str, int, str]],
-) -> None:
- """Process a warning captured by the internal pytest warnings plugin.
-
- :param warnings.WarningMessage warning_message:
- The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains
- the same attributes as the parameters of :py:func:`warnings.showwarning`.
-
- :param str when:
- Indicates when the warning was captured. Possible values:
-
- * ``"config"``: during pytest configuration/initialization stage.
- * ``"collect"``: during test collection.
- * ``"runtest"``: during test execution.
-
- :param str nodeid:
- Full id of the item.
-
- :param tuple|None location:
- When available, holds information about the execution context of the captured
- warning (filename, linenumber, function). ``function`` evaluates to <module>
- when the execution context is at the module level.
-
- .. versionadded:: 6.0
- """
-
-
-# -------------------------------------------------------------------------
-# Hooks for influencing skipping
-# -------------------------------------------------------------------------
-
-
-def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]:
- """Called when constructing the globals dictionary used for
- evaluating string conditions in xfail/skipif markers.
-
- This is useful when the condition for a marker requires
- objects that are expensive or impossible to obtain during
- collection time, which is required by normal boolean
- conditions.
-
- .. versionadded:: 6.2
-
- :param pytest.Config config: The pytest config object.
- :returns: A dictionary of additional globals to add.
- """
-
-
-# -------------------------------------------------------------------------
-# error handling and internal debugging hooks
-# -------------------------------------------------------------------------
-
-
-def pytest_internalerror(
- excrepr: "ExceptionRepr",
- excinfo: "ExceptionInfo[BaseException]",
-) -> Optional[bool]:
- """Called for internal errors.
-
- Return True to suppress the fallback handling of printing an
- INTERNALERROR message directly to sys.stderr.
- """
-
-
-def pytest_keyboard_interrupt(
- excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]",
-) -> None:
- """Called for keyboard interrupt."""
-
-
-def pytest_exception_interact(
- node: Union["Item", "Collector"],
- call: "CallInfo[Any]",
- report: Union["CollectReport", "TestReport"],
-) -> None:
- """Called when an exception was raised which can potentially be
- interactively handled.
-
- May be called during collection (see :hook:`pytest_make_collect_report`),
- in which case ``report`` is a :class:`CollectReport`.
-
- May be called during runtest of an item (see :hook:`pytest_runtest_protocol`),
- in which case ``report`` is a :class:`TestReport`.
-
- This hook is not called if the exception that was raised is an internal
- exception like ``skip.Exception``.
- """
-
-
-def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
- """Called upon pdb.set_trace().
-
- Can be used by plugins to take special action just before the python
- debugger enters interactive mode.
-
- :param pytest.Config config: The pytest config object.
- :param pdb.Pdb pdb: The Pdb instance.
- """
-
-
-def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
- """Called when leaving pdb (e.g. with continue after pdb.set_trace()).
-
- Can be used by plugins to take special action just after the python
- debugger leaves interactive mode.
-
- :param pytest.Config config: The pytest config object.
- :param pdb.Pdb pdb: The Pdb instance.
- """
diff --git a/contrib/python/pytest/py3/_pytest/junitxml.py b/contrib/python/pytest/py3/_pytest/junitxml.py
deleted file mode 100644
index 1b9e3bfeca..0000000000
--- a/contrib/python/pytest/py3/_pytest/junitxml.py
+++ /dev/null
@@ -1,696 +0,0 @@
-"""Report test results in JUnit-XML format, for use with Jenkins and build
-integration servers.
-
-Based on initial code from Ross Lawley.
-
-Output conforms to
-https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
-"""
-import functools
-import os
-import platform
-import re
-import xml.etree.ElementTree as ET
-from datetime import datetime
-from typing import Callable
-from typing import Dict
-from typing import List
-from typing import Match
-from typing import Optional
-from typing import Tuple
-from typing import Union
-
-import pytest
-from _pytest import nodes
-from _pytest import timing
-from _pytest._code.code import ExceptionRepr
-from _pytest._code.code import ReprFileLocation
-from _pytest.config import Config
-from _pytest.config import filename_arg
-from _pytest.config.argparsing import Parser
-from _pytest.fixtures import FixtureRequest
-from _pytest.reports import TestReport
-from _pytest.stash import StashKey
-from _pytest.terminal import TerminalReporter
-
-
-xml_key = StashKey["LogXML"]()
-
-
-def bin_xml_escape(arg: object) -> str:
- r"""Visually escape invalid XML characters.
-
- For example, transforms
- 'hello\aworld\b'
- into
- 'hello#x07world#x08'
- Note that the #xABs are *not* XML escapes - missing the ampersand &#xAB.
- The idea is to escape visually for the user rather than for XML itself.
- """
-
- def repl(matchobj: Match[str]) -> str:
- i = ord(matchobj.group())
- if i <= 0xFF:
- return "#x%02X" % i
- else:
- return "#x%04X" % i
-
- # The spec range of valid chars is:
- # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
- # For an unknown(?) reason, we disallow #x7F (DEL) as well.
- illegal_xml_re = (
- "[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]"
- )
- return re.sub(illegal_xml_re, repl, str(arg))
-
-
-def merge_family(left, right) -> None:
- result = {}
- for kl, vl in left.items():
- for kr, vr in right.items():
- if not isinstance(vl, list):
- raise TypeError(type(vl))
- result[kl] = vl + vr
- left.update(result)
-
-
-families = {}
-families["_base"] = {"testcase": ["classname", "name"]}
-families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
-
-# xUnit 1.x inherits legacy attributes.
-families["xunit1"] = families["_base"].copy()
-merge_family(families["xunit1"], families["_base_legacy"])
-
-# xUnit 2.x uses strict base attributes.
-families["xunit2"] = families["_base"]
-
-
-class _NodeReporter:
- def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None:
- self.id = nodeid
- self.xml = xml
- self.add_stats = self.xml.add_stats
- self.family = self.xml.family
- self.duration = 0.0
- self.properties: List[Tuple[str, str]] = []
- self.nodes: List[ET.Element] = []
- self.attrs: Dict[str, str] = {}
-
- def append(self, node: ET.Element) -> None:
- self.xml.add_stats(node.tag)
- self.nodes.append(node)
-
- def add_property(self, name: str, value: object) -> None:
- self.properties.append((str(name), bin_xml_escape(value)))
-
- def add_attribute(self, name: str, value: object) -> None:
- self.attrs[str(name)] = bin_xml_escape(value)
-
- def make_properties_node(self) -> Optional[ET.Element]:
- """Return a Junit node containing custom properties, if any."""
- if self.properties:
- properties = ET.Element("properties")
- for name, value in self.properties:
- properties.append(ET.Element("property", name=name, value=value))
- return properties
- return None
-
- def record_testreport(self, testreport: TestReport) -> None:
- names = mangle_test_address(testreport.nodeid)
- existing_attrs = self.attrs
- classnames = names[:-1]
- if self.xml.prefix:
- classnames.insert(0, self.xml.prefix)
- attrs: Dict[str, str] = {
- "classname": ".".join(classnames),
- "name": bin_xml_escape(names[-1]),
- "file": testreport.location[0],
- }
- if testreport.location[1] is not None:
- attrs["line"] = str(testreport.location[1])
- if hasattr(testreport, "url"):
- attrs["url"] = testreport.url
- self.attrs = attrs
- self.attrs.update(existing_attrs) # Restore any user-defined attributes.
-
- # Preserve legacy testcase behavior.
- if self.family == "xunit1":
- return
-
- # Filter out attributes not permitted by this test family.
- # Including custom attributes because they are not valid here.
- temp_attrs = {}
- for key in self.attrs.keys():
- if key in families[self.family]["testcase"]:
- temp_attrs[key] = self.attrs[key]
- self.attrs = temp_attrs
-
- def to_xml(self) -> ET.Element:
- testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration)
- properties = self.make_properties_node()
- if properties is not None:
- testcase.append(properties)
- testcase.extend(self.nodes)
- return testcase
-
- def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None:
- node = ET.Element(tag, message=message)
- node.text = bin_xml_escape(data)
- self.append(node)
-
- def write_captured_output(self, report: TestReport) -> None:
- if not self.xml.log_passing_tests and report.passed:
- return
-
- content_out = report.capstdout
- content_log = report.caplog
- content_err = report.capstderr
- if self.xml.logging == "no":
- return
- content_all = ""
- if self.xml.logging in ["log", "all"]:
- content_all = self._prepare_content(content_log, " Captured Log ")
- if self.xml.logging in ["system-out", "out-err", "all"]:
- content_all += self._prepare_content(content_out, " Captured Out ")
- self._write_content(report, content_all, "system-out")
- content_all = ""
- if self.xml.logging in ["system-err", "out-err", "all"]:
- content_all += self._prepare_content(content_err, " Captured Err ")
- self._write_content(report, content_all, "system-err")
- content_all = ""
- if content_all:
- self._write_content(report, content_all, "system-out")
-
- def _prepare_content(self, content: str, header: str) -> str:
- return "\n".join([header.center(80, "-"), content, ""])
-
- def _write_content(self, report: TestReport, content: str, jheader: str) -> None:
- tag = ET.Element(jheader)
- tag.text = bin_xml_escape(content)
- self.append(tag)
-
- def append_pass(self, report: TestReport) -> None:
- self.add_stats("passed")
-
- def append_failure(self, report: TestReport) -> None:
- # msg = str(report.longrepr.reprtraceback.extraline)
- if hasattr(report, "wasxfail"):
- self._add_simple("skipped", "xfail-marked test passes unexpectedly")
- else:
- assert report.longrepr is not None
- reprcrash: Optional[ReprFileLocation] = getattr(
- report.longrepr, "reprcrash", None
- )
- if reprcrash is not None:
- message = reprcrash.message
- else:
- message = str(report.longrepr)
- message = bin_xml_escape(message)
- self._add_simple("failure", message, str(report.longrepr))
-
- def append_collect_error(self, report: TestReport) -> None:
- # msg = str(report.longrepr.reprtraceback.extraline)
- assert report.longrepr is not None
- self._add_simple("error", "collection failure", str(report.longrepr))
-
- def append_collect_skipped(self, report: TestReport) -> None:
- self._add_simple("skipped", "collection skipped", str(report.longrepr))
-
- def append_error(self, report: TestReport) -> None:
- assert report.longrepr is not None
- reprcrash: Optional[ReprFileLocation] = getattr(
- report.longrepr, "reprcrash", None
- )
- if reprcrash is not None:
- reason = reprcrash.message
- else:
- reason = str(report.longrepr)
-
- if report.when == "teardown":
- msg = f'failed on teardown with "{reason}"'
- else:
- msg = f'failed on setup with "{reason}"'
- self._add_simple("error", msg, str(report.longrepr))
-
- def append_skipped(self, report: TestReport) -> None:
- if hasattr(report, "wasxfail"):
- xfailreason = report.wasxfail
- if xfailreason.startswith("reason: "):
- xfailreason = xfailreason[8:]
- xfailreason = bin_xml_escape(xfailreason)
- skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason)
- self.append(skipped)
- else:
- assert isinstance(report.longrepr, tuple)
- filename, lineno, skipreason = report.longrepr
- if skipreason.startswith("Skipped: "):
- skipreason = skipreason[9:]
- details = f"{filename}:{lineno}: {skipreason}"
-
- skipped = ET.Element("skipped", type="pytest.skip", message=skipreason)
- skipped.text = bin_xml_escape(details)
- self.append(skipped)
- self.write_captured_output(report)
-
- def finalize(self) -> None:
- data = self.to_xml()
- self.__dict__.clear()
- # Type ignored because mypy doesn't like overriding a method.
- # Also the return value doesn't match...
- self.to_xml = lambda: data # type: ignore[assignment]
-
-
-def _warn_incompatibility_with_xunit2(
- request: FixtureRequest, fixture_name: str
-) -> None:
- """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions."""
- from _pytest.warning_types import PytestWarning
-
- xml = request.config.stash.get(xml_key, None)
- if xml is not None and xml.family not in ("xunit1", "legacy"):
- request.node.warn(
- PytestWarning(
- "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(
- fixture_name=fixture_name, family=xml.family
- )
- )
- )
-
-
-@pytest.fixture
-def record_property(request: FixtureRequest) -> Callable[[str, object], None]:
- """Add extra properties to the calling test.
-
- User properties become part of the test report and are available to the
- configured reporters, like JUnit XML.
-
- The fixture is callable with ``name, value``. The value is automatically
- XML-encoded.
-
- Example::
-
- def test_function(record_property):
- record_property("example_key", 1)
- """
- _warn_incompatibility_with_xunit2(request, "record_property")
-
- def append_property(name: str, value: object) -> None:
- request.node.user_properties.append((name, value))
-
- return append_property
-
-
-@pytest.fixture
-def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]:
- """Add extra xml attributes to the tag for the calling test.
-
- The fixture is callable with ``name, value``. The value is
- automatically XML-encoded.
- """
- from _pytest.warning_types import PytestExperimentalApiWarning
-
- request.node.warn(
- PytestExperimentalApiWarning("record_xml_attribute is an experimental feature")
- )
-
- _warn_incompatibility_with_xunit2(request, "record_xml_attribute")
-
- # Declare noop
- def add_attr_noop(name: str, value: object) -> None:
- pass
-
- attr_func = add_attr_noop
-
- xml = request.config.stash.get(xml_key, None)
- if xml is not None:
- node_reporter = xml.node_reporter(request.node.nodeid)
- attr_func = node_reporter.add_attribute
-
- return attr_func
-
-
-def _check_record_param_type(param: str, v: str) -> None:
- """Used by record_testsuite_property to check that the given parameter name is of the proper
- type."""
- __tracebackhide__ = True
- if not isinstance(v, str):
- msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable]
- raise TypeError(msg.format(param=param, g=type(v).__name__))
-
-
-@pytest.fixture(scope="session")
-def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]:
- """Record a new ``<property>`` tag as child of the root ``<testsuite>``.
-
- This is suitable to writing global information regarding the entire test
- suite, and is compatible with ``xunit2`` JUnit family.
-
- This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
-
- .. code-block:: python
-
- def test_foo(record_testsuite_property):
- record_testsuite_property("ARCH", "PPC")
- record_testsuite_property("STORAGE_TYPE", "CEPH")
-
- ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
-
- .. warning::
-
- Currently this fixture **does not work** with the
- `pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See
- :issue:`7767` for details.
- """
-
- __tracebackhide__ = True
-
- def record_func(name: str, value: object) -> None:
- """No-op function in case --junitxml was not passed in the command-line."""
- __tracebackhide__ = True
- _check_record_param_type("name", name)
-
- xml = request.config.stash.get(xml_key, None)
- if xml is not None:
- record_func = xml.add_global_property # noqa
- return record_func
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("terminal reporting")
- group.addoption(
- "--junitxml",
- "--junit-xml",
- action="store",
- dest="xmlpath",
- metavar="path",
- type=functools.partial(filename_arg, optname="--junitxml"),
- default=None,
- help="create junit-xml style report file at given path.",
- )
- group.addoption(
- "--junitprefix",
- "--junit-prefix",
- action="store",
- metavar="str",
- default=None,
- help="prepend prefix to classnames in junit-xml output",
- )
- parser.addini(
- "junit_suite_name", "Test suite name for JUnit report", default="pytest"
- )
- parser.addini(
- "junit_logging",
- "Write captured log messages to JUnit report: "
- "one of no|log|system-out|system-err|out-err|all",
- default="no",
- )
- parser.addini(
- "junit_log_passing_tests",
- "Capture log information for passing tests to JUnit report: ",
- type="bool",
- default=True,
- )
- parser.addini(
- "junit_duration_report",
- "Duration time to report: one of total|call",
- default="total",
- ) # choices=['total', 'call'])
- parser.addini(
- "junit_family",
- "Emit XML for schema: one of legacy|xunit1|xunit2",
- default="xunit2",
- )
-
-
-def pytest_configure(config: Config) -> None:
- xmlpath = config.option.xmlpath
- # Prevent opening xmllog on worker nodes (xdist).
- if xmlpath and not hasattr(config, "workerinput"):
- junit_family = config.getini("junit_family")
- config.stash[xml_key] = LogXML(
- xmlpath,
- config.option.junitprefix,
- config.getini("junit_suite_name"),
- config.getini("junit_logging"),
- config.getini("junit_duration_report"),
- junit_family,
- config.getini("junit_log_passing_tests"),
- )
- config.pluginmanager.register(config.stash[xml_key])
-
-
-def pytest_unconfigure(config: Config) -> None:
- xml = config.stash.get(xml_key, None)
- if xml:
- del config.stash[xml_key]
- config.pluginmanager.unregister(xml)
-
-
-def mangle_test_address(address: str) -> List[str]:
- path, possible_open_bracket, params = address.partition("[")
- names = path.split("::")
- # Convert file path to dotted path.
- names[0] = names[0].replace(nodes.SEP, ".")
- names[0] = re.sub(r"\.py$", "", names[0])
- # Put any params back.
- names[-1] += possible_open_bracket + params
- return names
-
-
-class LogXML:
- def __init__(
- self,
- logfile,
- prefix: Optional[str],
- suite_name: str = "pytest",
- logging: str = "no",
- report_duration: str = "total",
- family="xunit1",
- log_passing_tests: bool = True,
- ) -> None:
- logfile = os.path.expanduser(os.path.expandvars(logfile))
- self.logfile = os.path.normpath(os.path.abspath(logfile))
- self.prefix = prefix
- self.suite_name = suite_name
- self.logging = logging
- self.log_passing_tests = log_passing_tests
- self.report_duration = report_duration
- self.family = family
- self.stats: Dict[str, int] = dict.fromkeys(
- ["error", "passed", "failure", "skipped"], 0
- )
- self.node_reporters: Dict[
- Tuple[Union[str, TestReport], object], _NodeReporter
- ] = {}
- self.node_reporters_ordered: List[_NodeReporter] = []
- self.global_properties: List[Tuple[str, str]] = []
-
- # List of reports that failed on call but teardown is pending.
- self.open_reports: List[TestReport] = []
- self.cnt_double_fail_tests = 0
-
- # Replaces convenience family with real family.
- if self.family == "legacy":
- self.family = "xunit1"
-
- def finalize(self, report: TestReport) -> None:
- nodeid = getattr(report, "nodeid", report)
- # Local hack to handle xdist report order.
- workernode = getattr(report, "node", None)
- reporter = self.node_reporters.pop((nodeid, workernode))
- if reporter is not None:
- reporter.finalize()
-
- def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter:
- nodeid: Union[str, TestReport] = getattr(report, "nodeid", report)
- # Local hack to handle xdist report order.
- workernode = getattr(report, "node", None)
-
- key = nodeid, workernode
-
- if key in self.node_reporters:
- # TODO: breaks for --dist=each
- return self.node_reporters[key]
-
- reporter = _NodeReporter(nodeid, self)
-
- self.node_reporters[key] = reporter
- self.node_reporters_ordered.append(reporter)
-
- return reporter
-
- def add_stats(self, key: str) -> None:
- if key in self.stats:
- self.stats[key] += 1
-
- def _opentestcase(self, report: TestReport) -> _NodeReporter:
- reporter = self.node_reporter(report)
- reporter.record_testreport(report)
- return reporter
-
- def pytest_runtest_logreport(self, report: TestReport) -> None:
- """Handle a setup/call/teardown report, generating the appropriate
- XML tags as necessary.
-
- Note: due to plugins like xdist, this hook may be called in interlaced
- order with reports from other nodes. For example:
-
- Usual call order:
- -> setup node1
- -> call node1
- -> teardown node1
- -> setup node2
- -> call node2
- -> teardown node2
-
- Possible call order in xdist:
- -> setup node1
- -> call node1
- -> setup node2
- -> call node2
- -> teardown node2
- -> teardown node1
- """
- close_report = None
- if report.passed:
- if report.when == "call": # ignore setup/teardown
- reporter = self._opentestcase(report)
- reporter.append_pass(report)
- elif report.failed:
- if report.when == "teardown":
- # The following vars are needed when xdist plugin is used.
- report_wid = getattr(report, "worker_id", None)
- report_ii = getattr(report, "item_index", None)
- close_report = next(
- (
- rep
- for rep in self.open_reports
- if (
- rep.nodeid == report.nodeid
- and getattr(rep, "item_index", None) == report_ii
- and getattr(rep, "worker_id", None) == report_wid
- )
- ),
- None,
- )
- if close_report:
- # We need to open new testcase in case we have failure in
- # call and error in teardown in order to follow junit
- # schema.
- self.finalize(close_report)
- self.cnt_double_fail_tests += 1
- reporter = self._opentestcase(report)
- if report.when == "call":
- reporter.append_failure(report)
- self.open_reports.append(report)
- if not self.log_passing_tests:
- reporter.write_captured_output(report)
- else:
- reporter.append_error(report)
- elif report.skipped:
- reporter = self._opentestcase(report)
- reporter.append_skipped(report)
- self.update_testcase_duration(report)
- if report.when == "teardown":
- reporter = self._opentestcase(report)
- reporter.write_captured_output(report)
-
- for propname, propvalue in report.user_properties:
- reporter.add_property(propname, str(propvalue))
-
- self.finalize(report)
- report_wid = getattr(report, "worker_id", None)
- report_ii = getattr(report, "item_index", None)
- close_report = next(
- (
- rep
- for rep in self.open_reports
- if (
- rep.nodeid == report.nodeid
- and getattr(rep, "item_index", None) == report_ii
- and getattr(rep, "worker_id", None) == report_wid
- )
- ),
- None,
- )
- if close_report:
- self.open_reports.remove(close_report)
-
- def update_testcase_duration(self, report: TestReport) -> None:
- """Accumulate total duration for nodeid from given report and update
- the Junit.testcase with the new total if already created."""
- if self.report_duration == "total" or report.when == self.report_duration:
- reporter = self.node_reporter(report)
- reporter.duration += getattr(report, "duration", 0.0)
-
- def pytest_collectreport(self, report: TestReport) -> None:
- if not report.passed:
- reporter = self._opentestcase(report)
- if report.failed:
- reporter.append_collect_error(report)
- else:
- reporter.append_collect_skipped(report)
-
- def pytest_internalerror(self, excrepr: ExceptionRepr) -> None:
- reporter = self.node_reporter("internal")
- reporter.attrs.update(classname="pytest", name="internal")
- reporter._add_simple("error", "internal error", str(excrepr))
-
- def pytest_sessionstart(self) -> None:
- self.suite_start_time = timing.time()
-
- def pytest_sessionfinish(self) -> None:
- dirname = os.path.dirname(os.path.abspath(self.logfile))
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
-
- with open(self.logfile, "w", encoding="utf-8") as logfile:
- suite_stop_time = timing.time()
- suite_time_delta = suite_stop_time - self.suite_start_time
-
- numtests = (
- self.stats["passed"]
- + self.stats["failure"]
- + self.stats["skipped"]
- + self.stats["error"]
- - self.cnt_double_fail_tests
- )
- logfile.write('<?xml version="1.0" encoding="utf-8"?>')
-
- suite_node = ET.Element(
- "testsuite",
- name=self.suite_name,
- errors=str(self.stats["error"]),
- failures=str(self.stats["failure"]),
- skipped=str(self.stats["skipped"]),
- tests=str(numtests),
- time="%.3f" % suite_time_delta,
- timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),
- hostname=platform.node(),
- )
- global_properties = self._get_global_properties_node()
- if global_properties is not None:
- suite_node.append(global_properties)
- for node_reporter in self.node_reporters_ordered:
- suite_node.append(node_reporter.to_xml())
- testsuites = ET.Element("testsuites")
- testsuites.append(suite_node)
- logfile.write(ET.tostring(testsuites, encoding="unicode"))
-
- def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
- terminalreporter.write_sep("-", f"generated xml file: {self.logfile}")
-
- def add_global_property(self, name: str, value: object) -> None:
- __tracebackhide__ = True
- _check_record_param_type("name", name)
- self.global_properties.append((name, bin_xml_escape(value)))
-
- def _get_global_properties_node(self) -> Optional[ET.Element]:
- """Return a Junit node containing custom properties, if any."""
- if self.global_properties:
- properties = ET.Element("properties")
- for name, value in self.global_properties:
- properties.append(ET.Element("property", name=name, value=value))
- return properties
- return None
diff --git a/contrib/python/pytest/py3/_pytest/legacypath.py b/contrib/python/pytest/py3/_pytest/legacypath.py
deleted file mode 100644
index 37e8c24220..0000000000
--- a/contrib/python/pytest/py3/_pytest/legacypath.py
+++ /dev/null
@@ -1,467 +0,0 @@
-"""Add backward compatibility support for the legacy py path type."""
-import shlex
-import subprocess
-from pathlib import Path
-from typing import List
-from typing import Optional
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-from iniconfig import SectionWrapper
-
-from _pytest.cacheprovider import Cache
-from _pytest.compat import final
-from _pytest.compat import LEGACY_PATH
-from _pytest.compat import legacy_path
-from _pytest.config import Config
-from _pytest.config import hookimpl
-from _pytest.config import PytestPluginManager
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import FixtureRequest
-from _pytest.main import Session
-from _pytest.monkeypatch import MonkeyPatch
-from _pytest.nodes import Collector
-from _pytest.nodes import Item
-from _pytest.nodes import Node
-from _pytest.pytester import HookRecorder
-from _pytest.pytester import Pytester
-from _pytest.pytester import RunResult
-from _pytest.terminal import TerminalReporter
-from _pytest.tmpdir import TempPathFactory
-
-if TYPE_CHECKING:
- from typing_extensions import Final
-
- import pexpect
-
-
-@final
-class Testdir:
- """
- Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead.
-
- All methods just forward to an internal :class:`Pytester` instance, converting results
- to `legacy_path` objects as necessary.
- """
-
- __test__ = False
-
- CLOSE_STDIN: "Final" = Pytester.CLOSE_STDIN
- TimeoutExpired: "Final" = Pytester.TimeoutExpired
-
- def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None:
- check_ispytest(_ispytest)
- self._pytester = pytester
-
- @property
- def tmpdir(self) -> LEGACY_PATH:
- """Temporary directory where tests are executed."""
- return legacy_path(self._pytester.path)
-
- @property
- def test_tmproot(self) -> LEGACY_PATH:
- return legacy_path(self._pytester._test_tmproot)
-
- @property
- def request(self):
- return self._pytester._request
-
- @property
- def plugins(self):
- return self._pytester.plugins
-
- @plugins.setter
- def plugins(self, plugins):
- self._pytester.plugins = plugins
-
- @property
- def monkeypatch(self) -> MonkeyPatch:
- return self._pytester._monkeypatch
-
- def make_hook_recorder(self, pluginmanager) -> HookRecorder:
- """See :meth:`Pytester.make_hook_recorder`."""
- return self._pytester.make_hook_recorder(pluginmanager)
-
- def chdir(self) -> None:
- """See :meth:`Pytester.chdir`."""
- return self._pytester.chdir()
-
- def finalize(self) -> None:
- """See :meth:`Pytester._finalize`."""
- return self._pytester._finalize()
-
- def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH:
- """See :meth:`Pytester.makefile`."""
- if ext and not ext.startswith("."):
- # pytester.makefile is going to throw a ValueError in a way that
- # testdir.makefile did not, because
- # pathlib.Path is stricter suffixes than py.path
- # This ext arguments is likely user error, but since testdir has
- # allowed this, we will prepend "." as a workaround to avoid breaking
- # testdir usage that worked before
- ext = "." + ext
- return legacy_path(self._pytester.makefile(ext, *args, **kwargs))
-
- def makeconftest(self, source) -> LEGACY_PATH:
- """See :meth:`Pytester.makeconftest`."""
- return legacy_path(self._pytester.makeconftest(source))
-
- def makeini(self, source) -> LEGACY_PATH:
- """See :meth:`Pytester.makeini`."""
- return legacy_path(self._pytester.makeini(source))
-
- def getinicfg(self, source: str) -> SectionWrapper:
- """See :meth:`Pytester.getinicfg`."""
- return self._pytester.getinicfg(source)
-
- def makepyprojecttoml(self, source) -> LEGACY_PATH:
- """See :meth:`Pytester.makepyprojecttoml`."""
- return legacy_path(self._pytester.makepyprojecttoml(source))
-
- def makepyfile(self, *args, **kwargs) -> LEGACY_PATH:
- """See :meth:`Pytester.makepyfile`."""
- return legacy_path(self._pytester.makepyfile(*args, **kwargs))
-
- def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH:
- """See :meth:`Pytester.maketxtfile`."""
- return legacy_path(self._pytester.maketxtfile(*args, **kwargs))
-
- def syspathinsert(self, path=None) -> None:
- """See :meth:`Pytester.syspathinsert`."""
- return self._pytester.syspathinsert(path)
-
- def mkdir(self, name) -> LEGACY_PATH:
- """See :meth:`Pytester.mkdir`."""
- return legacy_path(self._pytester.mkdir(name))
-
- def mkpydir(self, name) -> LEGACY_PATH:
- """See :meth:`Pytester.mkpydir`."""
- return legacy_path(self._pytester.mkpydir(name))
-
- def copy_example(self, name=None) -> LEGACY_PATH:
- """See :meth:`Pytester.copy_example`."""
- return legacy_path(self._pytester.copy_example(name))
-
- def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]:
- """See :meth:`Pytester.getnode`."""
- return self._pytester.getnode(config, arg)
-
- def getpathnode(self, path):
- """See :meth:`Pytester.getpathnode`."""
- return self._pytester.getpathnode(path)
-
- def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]:
- """See :meth:`Pytester.genitems`."""
- return self._pytester.genitems(colitems)
-
- def runitem(self, source):
- """See :meth:`Pytester.runitem`."""
- return self._pytester.runitem(source)
-
- def inline_runsource(self, source, *cmdlineargs):
- """See :meth:`Pytester.inline_runsource`."""
- return self._pytester.inline_runsource(source, *cmdlineargs)
-
- def inline_genitems(self, *args):
- """See :meth:`Pytester.inline_genitems`."""
- return self._pytester.inline_genitems(*args)
-
- def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False):
- """See :meth:`Pytester.inline_run`."""
- return self._pytester.inline_run(
- *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc
- )
-
- def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
- """See :meth:`Pytester.runpytest_inprocess`."""
- return self._pytester.runpytest_inprocess(*args, **kwargs)
-
- def runpytest(self, *args, **kwargs) -> RunResult:
- """See :meth:`Pytester.runpytest`."""
- return self._pytester.runpytest(*args, **kwargs)
-
- def parseconfig(self, *args) -> Config:
- """See :meth:`Pytester.parseconfig`."""
- return self._pytester.parseconfig(*args)
-
- def parseconfigure(self, *args) -> Config:
- """See :meth:`Pytester.parseconfigure`."""
- return self._pytester.parseconfigure(*args)
-
- def getitem(self, source, funcname="test_func"):
- """See :meth:`Pytester.getitem`."""
- return self._pytester.getitem(source, funcname)
-
- def getitems(self, source):
- """See :meth:`Pytester.getitems`."""
- return self._pytester.getitems(source)
-
- def getmodulecol(self, source, configargs=(), withinit=False):
- """See :meth:`Pytester.getmodulecol`."""
- return self._pytester.getmodulecol(
- source, configargs=configargs, withinit=withinit
- )
-
- def collect_by_name(
- self, modcol: Collector, name: str
- ) -> Optional[Union[Item, Collector]]:
- """See :meth:`Pytester.collect_by_name`."""
- return self._pytester.collect_by_name(modcol, name)
-
- def popen(
- self,
- cmdargs,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=CLOSE_STDIN,
- **kw,
- ):
- """See :meth:`Pytester.popen`."""
- return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw)
-
- def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult:
- """See :meth:`Pytester.run`."""
- return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin)
-
- def runpython(self, script) -> RunResult:
- """See :meth:`Pytester.runpython`."""
- return self._pytester.runpython(script)
-
- def runpython_c(self, command):
- """See :meth:`Pytester.runpython_c`."""
- return self._pytester.runpython_c(command)
-
- def runpytest_subprocess(self, *args, timeout=None) -> RunResult:
- """See :meth:`Pytester.runpytest_subprocess`."""
- return self._pytester.runpytest_subprocess(*args, timeout=timeout)
-
- def spawn_pytest(
- self, string: str, expect_timeout: float = 10.0
- ) -> "pexpect.spawn":
- """See :meth:`Pytester.spawn_pytest`."""
- return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout)
-
- def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
- """See :meth:`Pytester.spawn`."""
- return self._pytester.spawn(cmd, expect_timeout=expect_timeout)
-
- def __repr__(self) -> str:
- return f"<Testdir {self.tmpdir!r}>"
-
- def __str__(self) -> str:
- return str(self.tmpdir)
-
-
-class LegacyTestdirPlugin:
- @staticmethod
- @fixture
- def testdir(pytester: Pytester) -> Testdir:
- """
- Identical to :fixture:`pytester`, and provides an instance whose methods return
- legacy ``LEGACY_PATH`` objects instead when applicable.
-
- New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`.
- """
- return Testdir(pytester, _ispytest=True)
-
-
-@final
-@attr.s(init=False, auto_attribs=True)
-class TempdirFactory:
- """Backward compatibility wrapper that implements :class:``_pytest.compat.LEGACY_PATH``
- for :class:``TempPathFactory``."""
-
- _tmppath_factory: TempPathFactory
-
- def __init__(
- self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False
- ) -> None:
- check_ispytest(_ispytest)
- self._tmppath_factory = tmppath_factory
-
- def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
- """Same as :meth:`TempPathFactory.mktemp`, but returns a ``_pytest.compat.LEGACY_PATH`` object."""
- return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
-
- def getbasetemp(self) -> LEGACY_PATH:
- """Backward compat wrapper for ``_tmppath_factory.getbasetemp``."""
- return legacy_path(self._tmppath_factory.getbasetemp().resolve())
-
-
-class LegacyTmpdirPlugin:
- @staticmethod
- @fixture(scope="session")
- def tmpdir_factory(request: FixtureRequest) -> TempdirFactory:
- """Return a :class:`pytest.TempdirFactory` instance for the test session."""
- # Set dynamically by pytest_configure().
- return request.config._tmpdirhandler # type: ignore
-
- @staticmethod
- @fixture
- def tmpdir(tmp_path: Path) -> LEGACY_PATH:
- """Return a temporary directory path object which is unique to each test
- function invocation, created as a sub directory of the base temporary
- directory.
-
- By default, a new base temporary directory is created each test session,
- and old bases are removed after 3 sessions, to aid in debugging. If
- ``--basetemp`` is used then it is cleared each session. See :ref:`base
- temporary directory`.
-
- The returned object is a `legacy_path`_ object.
-
- .. _legacy_path: https://py.readthedocs.io/en/latest/path.html
- """
- return legacy_path(tmp_path)
-
-
-def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH:
- """Return a directory path object with the given name.
-
- Same as :func:`mkdir`, but returns a legacy py path instance.
- """
- return legacy_path(self.mkdir(name))
-
-
-def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH:
- """(deprecated) The file system path of the test module which collected this test."""
- return legacy_path(self.path)
-
-
-def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH:
- """The directory from which pytest was invoked.
-
- Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
-
- :type: LEGACY_PATH
- """
- return legacy_path(self.startpath)
-
-
-def Config_invocation_dir(self: Config) -> LEGACY_PATH:
- """The directory from which pytest was invoked.
-
- Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`,
- which is a :class:`pathlib.Path`.
-
- :type: LEGACY_PATH
- """
- return legacy_path(str(self.invocation_params.dir))
-
-
-def Config_rootdir(self: Config) -> LEGACY_PATH:
- """The path to the :ref:`rootdir <rootdir>`.
-
- Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`.
-
- :type: LEGACY_PATH
- """
- return legacy_path(str(self.rootpath))
-
-
-def Config_inifile(self: Config) -> Optional[LEGACY_PATH]:
- """The path to the :ref:`configfile <configfiles>`.
-
- Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`.
-
- :type: Optional[LEGACY_PATH]
- """
- return legacy_path(str(self.inipath)) if self.inipath else None
-
-
-def Session_stardir(self: Session) -> LEGACY_PATH:
- """The path from which pytest was invoked.
-
- Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
-
- :type: LEGACY_PATH
- """
- return legacy_path(self.startpath)
-
-
-def Config__getini_unknown_type(
- self, name: str, type: str, value: Union[str, List[str]]
-):
- if type == "pathlist":
- # TODO: This assert is probably not valid in all cases.
- assert self.inipath is not None
- dp = self.inipath.parent
- input_values = shlex.split(value) if isinstance(value, str) else value
- return [legacy_path(str(dp / x)) for x in input_values]
- else:
- raise ValueError(f"unknown configuration type: {type}", value)
-
-
-def Node_fspath(self: Node) -> LEGACY_PATH:
- """(deprecated) returns a legacy_path copy of self.path"""
- return legacy_path(self.path)
-
-
-def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None:
- self.path = Path(value)
-
-
-@hookimpl(tryfirst=True)
-def pytest_load_initial_conftests(early_config: Config) -> None:
- """Monkeypatch legacy path attributes in several classes, as early as possible."""
- mp = MonkeyPatch()
- early_config.add_cleanup(mp.undo)
-
- # Add Cache.makedir().
- mp.setattr(Cache, "makedir", Cache_makedir, raising=False)
-
- # Add FixtureRequest.fspath property.
- mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False)
-
- # Add TerminalReporter.startdir property.
- mp.setattr(
- TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False
- )
-
- # Add Config.{invocation_dir,rootdir,inifile} properties.
- mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False)
- mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False)
- mp.setattr(Config, "inifile", property(Config_inifile), raising=False)
-
- # Add Session.startdir property.
- mp.setattr(Session, "startdir", property(Session_stardir), raising=False)
-
- # Add pathlist configuration type.
- mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type)
-
- # Add Node.fspath property.
- mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False)
-
-
-@hookimpl
-def pytest_configure(config: Config) -> None:
- """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed."""
- if config.pluginmanager.has_plugin("tmpdir"):
- mp = MonkeyPatch()
- config.add_cleanup(mp.undo)
- # Create TmpdirFactory and attach it to the config object.
- #
- # This is to comply with existing plugins which expect the handler to be
- # available at pytest_configure time, but ideally should be moved entirely
- # to the tmpdir_factory session fixture.
- try:
- tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined]
- except AttributeError:
- # tmpdir plugin is blocked.
- pass
- else:
- _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True)
- mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False)
-
- config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir")
-
-
-@hookimpl
-def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None:
- # pytester is not loaded by default and is commonly loaded from a conftest,
- # so checking for it in `pytest_configure` is not enough.
- is_pytester = plugin is manager.get_plugin("pytester")
- if is_pytester and not manager.is_registered(LegacyTestdirPlugin):
- manager.register(LegacyTestdirPlugin, "legacypath-pytester")
diff --git a/contrib/python/pytest/py3/_pytest/logging.py b/contrib/python/pytest/py3/_pytest/logging.py
deleted file mode 100644
index 0163554bae..0000000000
--- a/contrib/python/pytest/py3/_pytest/logging.py
+++ /dev/null
@@ -1,826 +0,0 @@
-"""Access and control log capturing."""
-import io
-import logging
-import os
-import re
-from contextlib import contextmanager
-from contextlib import nullcontext
-from io import StringIO
-from pathlib import Path
-from typing import AbstractSet
-from typing import Dict
-from typing import Generator
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-from _pytest import nodes
-from _pytest._io import TerminalWriter
-from _pytest.capture import CaptureManager
-from _pytest.compat import final
-from _pytest.config import _strtobool
-from _pytest.config import Config
-from _pytest.config import create_terminal_writer
-from _pytest.config import hookimpl
-from _pytest.config import UsageError
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import FixtureRequest
-from _pytest.main import Session
-from _pytest.stash import StashKey
-from _pytest.terminal import TerminalReporter
-
-if TYPE_CHECKING:
- logging_StreamHandler = logging.StreamHandler[StringIO]
-else:
- logging_StreamHandler = logging.StreamHandler
-
-
-DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
-DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
-_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
-caplog_handler_key = StashKey["LogCaptureHandler"]()
-caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]()
-
-
-def _remove_ansi_escape_sequences(text: str) -> str:
- return _ANSI_ESCAPE_SEQ.sub("", text)
-
-
-class ColoredLevelFormatter(logging.Formatter):
- """A logging formatter which colorizes the %(levelname)..s part of the
- log format passed to __init__."""
-
- LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = {
- logging.CRITICAL: {"red"},
- logging.ERROR: {"red", "bold"},
- logging.WARNING: {"yellow"},
- logging.WARN: {"yellow"},
- logging.INFO: {"green"},
- logging.DEBUG: {"purple"},
- logging.NOTSET: set(),
- }
- LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)")
-
- def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None:
- super().__init__(*args, **kwargs)
- self._terminalwriter = terminalwriter
- self._original_fmt = self._style._fmt
- self._level_to_fmt_mapping: Dict[int, str] = {}
-
- for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
- self.add_color_level(level, *color_opts)
-
- def add_color_level(self, level: int, *color_opts: str) -> None:
- """Add or update color opts for a log level.
-
- :param level:
- Log level to apply a style to, e.g. ``logging.INFO``.
- :param color_opts:
- ANSI escape sequence color options. Capitalized colors indicates
- background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold
- green text on yellow background.
-
- .. warning::
- This is an experimental API.
- """
-
- assert self._fmt is not None
- levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
- if not levelname_fmt_match:
- return
- levelname_fmt = levelname_fmt_match.group()
-
- formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)}
-
- # add ANSI escape sequences around the formatted levelname
- color_kwargs = {name: True for name in color_opts}
- colorized_formatted_levelname = self._terminalwriter.markup(
- formatted_levelname, **color_kwargs
- )
- self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
- colorized_formatted_levelname, self._fmt
- )
-
- def format(self, record: logging.LogRecord) -> str:
- fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
- self._style._fmt = fmt
- return super().format(record)
-
-
-class PercentStyleMultiline(logging.PercentStyle):
- """A logging style with special support for multiline messages.
-
- If the message of a record consists of multiple lines, this style
- formats the message as if each line were logged separately.
- """
-
- def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None:
- super().__init__(fmt)
- self._auto_indent = self._get_auto_indent(auto_indent)
-
- @staticmethod
- def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int:
- """Determine the current auto indentation setting.
-
- Specify auto indent behavior (on/off/fixed) by passing in
- extra={"auto_indent": [value]} to the call to logging.log() or
- using a --log-auto-indent [value] command line or the
- log_auto_indent [value] config option.
-
- Default behavior is auto-indent off.
-
- Using the string "True" or "on" or the boolean True as the value
- turns auto indent on, using the string "False" or "off" or the
- boolean False or the int 0 turns it off, and specifying a
- positive integer fixes the indentation position to the value
- specified.
-
- Any other values for the option are invalid, and will silently be
- converted to the default.
-
- :param None|bool|int|str auto_indent_option:
- User specified option for indentation from command line, config
- or extra kwarg. Accepts int, bool or str. str option accepts the
- same range of values as boolean config options, as well as
- positive integers represented in str form.
-
- :returns:
- Indentation value, which can be
- -1 (automatically determine indentation) or
- 0 (auto-indent turned off) or
- >0 (explicitly set indentation position).
- """
-
- if auto_indent_option is None:
- return 0
- elif isinstance(auto_indent_option, bool):
- if auto_indent_option:
- return -1
- else:
- return 0
- elif isinstance(auto_indent_option, int):
- return int(auto_indent_option)
- elif isinstance(auto_indent_option, str):
- try:
- return int(auto_indent_option)
- except ValueError:
- pass
- try:
- if _strtobool(auto_indent_option):
- return -1
- except ValueError:
- return 0
-
- return 0
-
- def format(self, record: logging.LogRecord) -> str:
- if "\n" in record.message:
- if hasattr(record, "auto_indent"):
- # Passed in from the "extra={}" kwarg on the call to logging.log().
- auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined]
- else:
- auto_indent = self._auto_indent
-
- if auto_indent:
- lines = record.message.splitlines()
- formatted = self._fmt % {**record.__dict__, "message": lines[0]}
-
- if auto_indent < 0:
- indentation = _remove_ansi_escape_sequences(formatted).find(
- lines[0]
- )
- else:
- # Optimizes logging by allowing a fixed indentation.
- indentation = auto_indent
- lines[0] = formatted
- return ("\n" + " " * indentation).join(lines)
- return self._fmt % record.__dict__
-
-
-def get_option_ini(config: Config, *names: str):
- for name in names:
- ret = config.getoption(name) # 'default' arg won't work as expected
- if ret is None:
- ret = config.getini(name)
- if ret:
- return ret
-
-
-def pytest_addoption(parser: Parser) -> None:
- """Add options to control log capturing."""
- group = parser.getgroup("logging")
-
- def add_option_ini(option, dest, default=None, type=None, **kwargs):
- parser.addini(
- dest, default=default, type=type, help="default value for " + option
- )
- group.addoption(option, dest=dest, **kwargs)
-
- add_option_ini(
- "--log-level",
- dest="log_level",
- default=None,
- metavar="LEVEL",
- help=(
- "level of messages to catch/display.\n"
- "Not set by default, so it depends on the root/parent log handler's"
- ' effective level, where it is "WARNING" by default.'
- ),
- )
- add_option_ini(
- "--log-format",
- dest="log_format",
- default=DEFAULT_LOG_FORMAT,
- help="log format as used by the logging module.",
- )
- add_option_ini(
- "--log-date-format",
- dest="log_date_format",
- default=DEFAULT_LOG_DATE_FORMAT,
- help="log date format as used by the logging module.",
- )
- parser.addini(
- "log_cli",
- default=False,
- type="bool",
- help='enable log display during test run (also known as "live logging").',
- )
- add_option_ini(
- "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
- )
- add_option_ini(
- "--log-cli-format",
- dest="log_cli_format",
- default=None,
- help="log format as used by the logging module.",
- )
- add_option_ini(
- "--log-cli-date-format",
- dest="log_cli_date_format",
- default=None,
- help="log date format as used by the logging module.",
- )
- add_option_ini(
- "--log-file",
- dest="log_file",
- default=None,
- help="path to a file when logging will be written to.",
- )
- add_option_ini(
- "--log-file-level",
- dest="log_file_level",
- default=None,
- help="log file logging level.",
- )
- add_option_ini(
- "--log-file-format",
- dest="log_file_format",
- default=DEFAULT_LOG_FORMAT,
- help="log format as used by the logging module.",
- )
- add_option_ini(
- "--log-file-date-format",
- dest="log_file_date_format",
- default=DEFAULT_LOG_DATE_FORMAT,
- help="log date format as used by the logging module.",
- )
- add_option_ini(
- "--log-auto-indent",
- dest="log_auto_indent",
- default=None,
- help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.",
- )
-
-
-_HandlerType = TypeVar("_HandlerType", bound=logging.Handler)
-
-
-# Not using @contextmanager for performance reasons.
-class catching_logs:
- """Context manager that prepares the whole logging machinery properly."""
-
- __slots__ = ("handler", "level", "orig_level")
-
- def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None:
- self.handler = handler
- self.level = level
-
- def __enter__(self):
- root_logger = logging.getLogger()
- if self.level is not None:
- self.handler.setLevel(self.level)
- root_logger.addHandler(self.handler)
- if self.level is not None:
- self.orig_level = root_logger.level
- root_logger.setLevel(min(self.orig_level, self.level))
- return self.handler
-
- def __exit__(self, type, value, traceback):
- root_logger = logging.getLogger()
- if self.level is not None:
- root_logger.setLevel(self.orig_level)
- root_logger.removeHandler(self.handler)
-
-
-class LogCaptureHandler(logging_StreamHandler):
- """A logging handler that stores log records and the log text."""
-
- def __init__(self) -> None:
- """Create a new log handler."""
- super().__init__(StringIO())
- self.records: List[logging.LogRecord] = []
-
- def emit(self, record: logging.LogRecord) -> None:
- """Keep the log records in a list in addition to the log text."""
- self.records.append(record)
- super().emit(record)
-
- def reset(self) -> None:
- self.records = []
- self.stream = StringIO()
-
- def handleError(self, record: logging.LogRecord) -> None:
- if logging.raiseExceptions:
- # Fail the test if the log message is bad (emit failed).
- # The default behavior of logging is to print "Logging error"
- # to stderr with the call stack and some extra details.
- # pytest wants to make such mistakes visible during testing.
- raise
-
-
-@final
-class LogCaptureFixture:
- """Provides access and control of log capturing."""
-
- def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
- check_ispytest(_ispytest)
- self._item = item
- self._initial_handler_level: Optional[int] = None
- # Dict of log name -> log level.
- self._initial_logger_levels: Dict[Optional[str], int] = {}
-
- def _finalize(self) -> None:
- """Finalize the fixture.
-
- This restores the log levels changed by :meth:`set_level`.
- """
- # Restore log levels.
- if self._initial_handler_level is not None:
- self.handler.setLevel(self._initial_handler_level)
- for logger_name, level in self._initial_logger_levels.items():
- logger = logging.getLogger(logger_name)
- logger.setLevel(level)
-
- @property
- def handler(self) -> LogCaptureHandler:
- """Get the logging handler used by the fixture.
-
- :rtype: LogCaptureHandler
- """
- return self._item.stash[caplog_handler_key]
-
- def get_records(self, when: str) -> List[logging.LogRecord]:
- """Get the logging records for one of the possible test phases.
-
- :param str when:
- Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
-
- :returns: The list of captured records at the given stage.
- :rtype: List[logging.LogRecord]
-
- .. versionadded:: 3.4
- """
- return self._item.stash[caplog_records_key].get(when, [])
-
- @property
- def text(self) -> str:
- """The formatted log text."""
- return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
-
- @property
- def records(self) -> List[logging.LogRecord]:
- """The list of log records."""
- return self.handler.records
-
- @property
- def record_tuples(self) -> List[Tuple[str, int, str]]:
- """A list of a stripped down version of log records intended
- for use in assertion comparison.
-
- The format of the tuple is:
-
- (logger_name, log_level, message)
- """
- return [(r.name, r.levelno, r.getMessage()) for r in self.records]
-
- @property
- def messages(self) -> List[str]:
- """A list of format-interpolated log messages.
-
- Unlike 'records', which contains the format string and parameters for
- interpolation, log messages in this list are all interpolated.
-
- Unlike 'text', which contains the output from the handler, log
- messages in this list are unadorned with levels, timestamps, etc,
- making exact comparisons more reliable.
-
- Note that traceback or stack info (from :func:`logging.exception` or
- the `exc_info` or `stack_info` arguments to the logging functions) is
- not included, as this is added by the formatter in the handler.
-
- .. versionadded:: 3.7
- """
- return [r.getMessage() for r in self.records]
-
- def clear(self) -> None:
- """Reset the list of log records and the captured log text."""
- self.handler.reset()
-
- def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
- """Set the level of a logger for the duration of a test.
-
- .. versionchanged:: 3.4
- The levels of the loggers changed by this function will be
- restored to their initial values at the end of the test.
-
- :param int level: The level.
- :param str logger: The logger to update. If not given, the root logger.
- """
- logger_obj = logging.getLogger(logger)
- # Save the original log-level to restore it during teardown.
- self._initial_logger_levels.setdefault(logger, logger_obj.level)
- logger_obj.setLevel(level)
- if self._initial_handler_level is None:
- self._initial_handler_level = self.handler.level
- self.handler.setLevel(level)
-
- @contextmanager
- def at_level(
- self, level: Union[int, str], logger: Optional[str] = None
- ) -> Generator[None, None, None]:
- """Context manager that sets the level for capturing of logs. After
- the end of the 'with' statement the level is restored to its original
- value.
-
- :param int level: The level.
- :param str logger: The logger to update. If not given, the root logger.
- """
- logger_obj = logging.getLogger(logger)
- orig_level = logger_obj.level
- logger_obj.setLevel(level)
- handler_orig_level = self.handler.level
- self.handler.setLevel(level)
- try:
- yield
- finally:
- logger_obj.setLevel(orig_level)
- self.handler.setLevel(handler_orig_level)
-
-
-@fixture
-def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]:
- """Access and control log capturing.
-
- Captured logs are available through the following properties/methods::
-
- * caplog.messages -> list of format-interpolated log messages
- * caplog.text -> string containing formatted log output
- * caplog.records -> list of logging.LogRecord instances
- * caplog.record_tuples -> list of (logger_name, level, message) tuples
- * caplog.clear() -> clear captured records and formatted log output string
- """
- result = LogCaptureFixture(request.node, _ispytest=True)
- yield result
- result._finalize()
-
-
-def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]:
- for setting_name in setting_names:
- log_level = config.getoption(setting_name)
- if log_level is None:
- log_level = config.getini(setting_name)
- if log_level:
- break
- else:
- return None
-
- if isinstance(log_level, str):
- log_level = log_level.upper()
- try:
- return int(getattr(logging, log_level, log_level))
- except ValueError as e:
- # Python logging does not recognise this as a logging level
- raise UsageError(
- "'{}' is not recognized as a logging level name for "
- "'{}'. Please consider passing the "
- "logging level num instead.".format(log_level, setting_name)
- ) from e
-
-
-# run after terminalreporter/capturemanager are configured
-@hookimpl(trylast=True)
-def pytest_configure(config: Config) -> None:
- config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
-
-
-class LoggingPlugin:
- """Attaches to the logging module and captures log messages for each test."""
-
- def __init__(self, config: Config) -> None:
- """Create a new plugin to capture log messages.
-
- The formatter can be safely shared across all handlers so
- create a single one for the entire test session here.
- """
- self._config = config
-
- # Report logging.
- self.formatter = self._create_formatter(
- get_option_ini(config, "log_format"),
- get_option_ini(config, "log_date_format"),
- get_option_ini(config, "log_auto_indent"),
- )
- self.log_level = get_log_level_for_setting(config, "log_level")
- self.caplog_handler = LogCaptureHandler()
- self.caplog_handler.setFormatter(self.formatter)
- self.report_handler = LogCaptureHandler()
- self.report_handler.setFormatter(self.formatter)
-
- # File logging.
- self.log_file_level = get_log_level_for_setting(config, "log_file_level")
- log_file = get_option_ini(config, "log_file") or os.devnull
- if log_file != os.devnull:
- directory = os.path.dirname(os.path.abspath(log_file))
- if not os.path.isdir(directory):
- os.makedirs(directory)
-
- self.log_file_handler = _FileHandler(log_file, mode="w", encoding="UTF-8")
- log_file_format = get_option_ini(config, "log_file_format", "log_format")
- log_file_date_format = get_option_ini(
- config, "log_file_date_format", "log_date_format"
- )
-
- log_file_formatter = logging.Formatter(
- log_file_format, datefmt=log_file_date_format
- )
- self.log_file_handler.setFormatter(log_file_formatter)
-
- # CLI/live logging.
- self.log_cli_level = get_log_level_for_setting(
- config, "log_cli_level", "log_level"
- )
- if self._log_cli_enabled():
- terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
- capture_manager = config.pluginmanager.get_plugin("capturemanager")
- # if capturemanager plugin is disabled, live logging still works.
- self.log_cli_handler: Union[
- _LiveLoggingStreamHandler, _LiveLoggingNullHandler
- ] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
- else:
- self.log_cli_handler = _LiveLoggingNullHandler()
- log_cli_formatter = self._create_formatter(
- get_option_ini(config, "log_cli_format", "log_format"),
- get_option_ini(config, "log_cli_date_format", "log_date_format"),
- get_option_ini(config, "log_auto_indent"),
- )
- self.log_cli_handler.setFormatter(log_cli_formatter)
-
- def _create_formatter(self, log_format, log_date_format, auto_indent):
- # Color option doesn't exist if terminal plugin is disabled.
- color = getattr(self._config.option, "color", "no")
- if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
- log_format
- ):
- formatter: logging.Formatter = ColoredLevelFormatter(
- create_terminal_writer(self._config), log_format, log_date_format
- )
- else:
- formatter = logging.Formatter(log_format, log_date_format)
-
- formatter._style = PercentStyleMultiline(
- formatter._style._fmt, auto_indent=auto_indent
- )
-
- return formatter
-
- def set_log_path(self, fname: str) -> None:
- """Set the filename parameter for Logging.FileHandler().
-
- Creates parent directory if it does not exist.
-
- .. warning::
- This is an experimental API.
- """
- fpath = Path(fname)
-
- if not fpath.is_absolute():
- fpath = self._config.rootpath / fpath
-
- if not fpath.parent.exists():
- fpath.parent.mkdir(exist_ok=True, parents=True)
-
- # https://github.com/python/mypy/issues/11193
- stream: io.TextIOWrapper = fpath.open(mode="w", encoding="UTF-8") # type: ignore[assignment]
- old_stream = self.log_file_handler.setStream(stream)
- if old_stream:
- old_stream.close()
-
- def _log_cli_enabled(self):
- """Return whether live logging is enabled."""
- enabled = self._config.getoption(
- "--log-cli-level"
- ) is not None or self._config.getini("log_cli")
- if not enabled:
- return False
-
- terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
- if terminal_reporter is None:
- # terminal reporter is disabled e.g. by pytest-xdist.
- return False
-
- return True
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_sessionstart(self) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("sessionstart")
-
- with catching_logs(self.log_cli_handler, level=self.log_cli_level):
- with catching_logs(self.log_file_handler, level=self.log_file_level):
- yield
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_collection(self) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("collection")
-
- with catching_logs(self.log_cli_handler, level=self.log_cli_level):
- with catching_logs(self.log_file_handler, level=self.log_file_level):
- yield
-
- @hookimpl(hookwrapper=True)
- def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]:
- if session.config.option.collectonly:
- yield
- return
-
- if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
- # The verbose flag is needed to avoid messy test progress output.
- self._config.option.verbose = 1
-
- with catching_logs(self.log_cli_handler, level=self.log_cli_level):
- with catching_logs(self.log_file_handler, level=self.log_file_level):
- yield # Run all the tests.
-
- @hookimpl
- def pytest_runtest_logstart(self) -> None:
- self.log_cli_handler.reset()
- self.log_cli_handler.set_when("start")
-
- @hookimpl
- def pytest_runtest_logreport(self) -> None:
- self.log_cli_handler.set_when("logreport")
-
- def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]:
- """Implement the internals of the pytest_runtest_xxx() hooks."""
- with catching_logs(
- self.caplog_handler,
- level=self.log_level,
- ) as caplog_handler, catching_logs(
- self.report_handler,
- level=self.log_level,
- ) as report_handler:
- caplog_handler.reset()
- report_handler.reset()
- item.stash[caplog_records_key][when] = caplog_handler.records
- item.stash[caplog_handler_key] = caplog_handler
-
- yield
-
- log = report_handler.stream.getvalue().strip()
- item.add_report_section(when, "log", log)
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("setup")
-
- empty: Dict[str, List[logging.LogRecord]] = {}
- item.stash[caplog_records_key] = empty
- yield from self._runtest_for(item, "setup")
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("call")
-
- yield from self._runtest_for(item, "call")
-
- @hookimpl(hookwrapper=True)
- def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("teardown")
-
- yield from self._runtest_for(item, "teardown")
- del item.stash[caplog_records_key]
- del item.stash[caplog_handler_key]
-
- @hookimpl
- def pytest_runtest_logfinish(self) -> None:
- self.log_cli_handler.set_when("finish")
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_sessionfinish(self) -> Generator[None, None, None]:
- self.log_cli_handler.set_when("sessionfinish")
-
- with catching_logs(self.log_cli_handler, level=self.log_cli_level):
- with catching_logs(self.log_file_handler, level=self.log_file_level):
- yield
-
- @hookimpl
- def pytest_unconfigure(self) -> None:
- # Close the FileHandler explicitly.
- # (logging.shutdown might have lost the weakref?!)
- self.log_file_handler.close()
-
-
-class _FileHandler(logging.FileHandler):
- """A logging FileHandler with pytest tweaks."""
-
- def handleError(self, record: logging.LogRecord) -> None:
- # Handled by LogCaptureHandler.
- pass
-
-
-class _LiveLoggingStreamHandler(logging_StreamHandler):
- """A logging StreamHandler used by the live logging feature: it will
- write a newline before the first log message in each test.
-
- During live logging we must also explicitly disable stdout/stderr
- capturing otherwise it will get captured and won't appear in the
- terminal.
- """
-
- # Officially stream needs to be a IO[str], but TerminalReporter
- # isn't. So force it.
- stream: TerminalReporter = None # type: ignore
-
- def __init__(
- self,
- terminal_reporter: TerminalReporter,
- capture_manager: Optional[CaptureManager],
- ) -> None:
- super().__init__(stream=terminal_reporter) # type: ignore[arg-type]
- self.capture_manager = capture_manager
- self.reset()
- self.set_when(None)
- self._test_outcome_written = False
-
- def reset(self) -> None:
- """Reset the handler; should be called before the start of each test."""
- self._first_record_emitted = False
-
- def set_when(self, when: Optional[str]) -> None:
- """Prepare for the given test phase (setup/call/teardown)."""
- self._when = when
- self._section_name_shown = False
- if when == "start":
- self._test_outcome_written = False
-
- def emit(self, record: logging.LogRecord) -> None:
- ctx_manager = (
- self.capture_manager.global_and_fixture_disabled()
- if self.capture_manager
- else nullcontext()
- )
- with ctx_manager:
- if not self._first_record_emitted:
- self.stream.write("\n")
- self._first_record_emitted = True
- elif self._when in ("teardown", "finish"):
- if not self._test_outcome_written:
- self._test_outcome_written = True
- self.stream.write("\n")
- if not self._section_name_shown and self._when:
- self.stream.section("live log " + self._when, sep="-", bold=True)
- self._section_name_shown = True
- super().emit(record)
-
- def handleError(self, record: logging.LogRecord) -> None:
- # Handled by LogCaptureHandler.
- pass
-
-
-class _LiveLoggingNullHandler(logging.NullHandler):
- """A logging handler used when live logging is disabled."""
-
- def reset(self) -> None:
- pass
-
- def set_when(self, when: str) -> None:
- pass
-
- def handleError(self, record: logging.LogRecord) -> None:
- # Handled by LogCaptureHandler.
- pass
diff --git a/contrib/python/pytest/py3/_pytest/main.py b/contrib/python/pytest/py3/_pytest/main.py
deleted file mode 100644
index 8f590754ae..0000000000
--- a/contrib/python/pytest/py3/_pytest/main.py
+++ /dev/null
@@ -1,895 +0,0 @@
-"""Core implementation of the testing process: init, session, runtest loop."""
-import argparse
-import fnmatch
-import functools
-import importlib
-import os
-import sys
-from pathlib import Path
-from typing import Callable
-from typing import Dict
-from typing import FrozenSet
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import overload
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-
-import _pytest._code
-from _pytest import nodes
-from _pytest.compat import final
-from _pytest.config import Config
-from _pytest.config import directory_arg
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config import PytestPluginManager
-from _pytest.config import UsageError
-from _pytest.config.argparsing import Parser
-from _pytest.fixtures import FixtureManager
-from _pytest.outcomes import exit
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import fnmatch_ex
-from _pytest.pathlib import visit
-from _pytest.reports import CollectReport
-from _pytest.reports import TestReport
-from _pytest.runner import collect_one_node
-from _pytest.runner import SetupState
-
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
-
-def pytest_addoption(parser: Parser) -> None:
- parser.addini(
- "norecursedirs",
- "directory patterns to avoid for recursion",
- type="args",
- default=[
- "*.egg",
- ".*",
- "_darcs",
- "build",
- "CVS",
- "dist",
- "node_modules",
- "venv",
- "{arch}",
- ],
- )
- parser.addini(
- "testpaths",
- "directories to search for tests when no files or directories are given in the "
- "command line.",
- type="args",
- default=[],
- )
- group = parser.getgroup("general", "running and selection options")
- group._addoption(
- "-x",
- "--exitfirst",
- action="store_const",
- dest="maxfail",
- const=1,
- help="exit instantly on first error or failed test.",
- )
- group = parser.getgroup("pytest-warnings")
- group.addoption(
- "-W",
- "--pythonwarnings",
- action="append",
- help="set which warnings to report, see -W option of python itself.",
- )
- parser.addini(
- "filterwarnings",
- type="linelist",
- help="Each line specifies a pattern for "
- "warnings.filterwarnings. "
- "Processed after -W/--pythonwarnings.",
- )
- group._addoption(
- "--maxfail",
- metavar="num",
- action="store",
- type=int,
- dest="maxfail",
- default=0,
- help="exit after first num failures or errors.",
- )
- group._addoption(
- "--strict-config",
- action="store_true",
- help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.",
- )
- group._addoption(
- "--strict-markers",
- action="store_true",
- help="markers not registered in the `markers` section of the configuration file raise errors.",
- )
- group._addoption(
- "--strict",
- action="store_true",
- help="(deprecated) alias to --strict-markers.",
- )
- group._addoption(
- "-c",
- metavar="file",
- type=str,
- dest="inifilename",
- help="load configuration from `file` instead of trying to locate one of the implicit "
- "configuration files.",
- )
- group._addoption(
- "--continue-on-collection-errors",
- action="store_true",
- default=False,
- dest="continue_on_collection_errors",
- help="Force test execution even if collection errors occur.",
- )
- group._addoption(
- "--rootdir",
- action="store",
- dest="rootdir",
- help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
- "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
- "'$HOME/root_dir'.",
- )
-
- group = parser.getgroup("collect", "collection")
- group.addoption(
- "--collectonly",
- "--collect-only",
- "--co",
- action="store_true",
- help="only collect tests, don't execute them.",
- )
- group.addoption(
- "--pyargs",
- action="store_true",
- help="try to interpret all arguments as python packages.",
- )
- group.addoption(
- "--ignore",
- action="append",
- metavar="path",
- help="ignore path during collection (multi-allowed).",
- )
- group.addoption(
- "--ignore-glob",
- action="append",
- metavar="path",
- help="ignore path pattern during collection (multi-allowed).",
- )
- group.addoption(
- "--deselect",
- action="append",
- metavar="nodeid_prefix",
- help="deselect item (via node id prefix) during collection (multi-allowed).",
- )
- group.addoption(
- "--confcutdir",
- dest="confcutdir",
- default=None,
- metavar="dir",
- type=functools.partial(directory_arg, optname="--confcutdir"),
- help="only load conftest.py's relative to specified dir.",
- )
- group.addoption(
- "--noconftest",
- action="store_true",
- dest="noconftest",
- default=False,
- help="Don't load any conftest.py files.",
- )
- group.addoption(
- "--keepduplicates",
- "--keep-duplicates",
- action="store_true",
- dest="keepduplicates",
- default=False,
- help="Keep duplicate tests.",
- )
- group.addoption(
- "--collect-in-virtualenv",
- action="store_true",
- dest="collect_in_virtualenv",
- default=False,
- help="Don't ignore tests in a local virtualenv directory",
- )
- group.addoption(
- "--import-mode",
- default="prepend",
- choices=["prepend", "append", "importlib"],
- dest="importmode",
- help="prepend/append to sys.path when importing test modules and conftest files, "
- "default is to prepend.",
- )
-
- group = parser.getgroup("debugconfig", "test session debugging and configuration")
- group.addoption(
- "--basetemp",
- dest="basetemp",
- default=None,
- type=validate_basetemp,
- metavar="dir",
- help=(
- "base temporary directory for this test run."
- "(warning: this directory is removed if it exists)"
- ),
- )
-
-
-def validate_basetemp(path: str) -> str:
- # GH 7119
- msg = "basetemp must not be empty, the current working directory or any parent directory of it"
-
- # empty path
- if not path:
- raise argparse.ArgumentTypeError(msg)
-
- def is_ancestor(base: Path, query: Path) -> bool:
- """Return whether query is an ancestor of base."""
- if base == query:
- return True
- return query in base.parents
-
- # check if path is an ancestor of cwd
- if is_ancestor(Path.cwd(), Path(path).absolute()):
- raise argparse.ArgumentTypeError(msg)
-
- # check symlinks for ancestors
- if is_ancestor(Path.cwd().resolve(), Path(path).resolve()):
- raise argparse.ArgumentTypeError(msg)
-
- return path
-
-
-def wrap_session(
- config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]]
-) -> Union[int, ExitCode]:
- """Skeleton command line program."""
- session = Session.from_config(config)
- session.exitstatus = ExitCode.OK
- initstate = 0
- try:
- try:
- config._do_configure()
- initstate = 1
- config.hook.pytest_sessionstart(session=session)
- initstate = 2
- session.exitstatus = doit(config, session) or 0
- except UsageError:
- session.exitstatus = ExitCode.USAGE_ERROR
- raise
- except Failed:
- session.exitstatus = ExitCode.TESTS_FAILED
- except (KeyboardInterrupt, exit.Exception):
- excinfo = _pytest._code.ExceptionInfo.from_current()
- exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED
- if isinstance(excinfo.value, exit.Exception):
- if excinfo.value.returncode is not None:
- exitstatus = excinfo.value.returncode
- if initstate < 2:
- sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n")
- config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
- session.exitstatus = exitstatus
- except BaseException:
- session.exitstatus = ExitCode.INTERNAL_ERROR
- excinfo = _pytest._code.ExceptionInfo.from_current()
- try:
- config.notify_exception(excinfo, config.option)
- except exit.Exception as exc:
- if exc.returncode is not None:
- session.exitstatus = exc.returncode
- sys.stderr.write(f"{type(exc).__name__}: {exc}\n")
- else:
- if isinstance(excinfo.value, SystemExit):
- sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
-
- finally:
- # Explicitly break reference cycle.
- excinfo = None # type: ignore
- os.chdir(session.startpath)
- if initstate >= 2:
- try:
- config.hook.pytest_sessionfinish(
- session=session, exitstatus=session.exitstatus
- )
- except exit.Exception as exc:
- if exc.returncode is not None:
- session.exitstatus = exc.returncode
- sys.stderr.write(f"{type(exc).__name__}: {exc}\n")
- config._ensure_unconfigure()
- return session.exitstatus
-
-
-def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]:
- return wrap_session(config, _main)
-
-
-def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]:
- """Default command line protocol for initialization, session,
- running tests and reporting."""
- config.hook.pytest_collection(session=session)
- config.hook.pytest_runtestloop(session=session)
-
- if session.testsfailed:
- return ExitCode.TESTS_FAILED
- elif session.testscollected == 0:
- return ExitCode.NO_TESTS_COLLECTED
- return None
-
-
-def pytest_collection(session: "Session") -> None:
- session.perform_collect()
-
-
-def pytest_runtestloop(session: "Session") -> bool:
- if session.testsfailed and not session.config.option.continue_on_collection_errors:
- raise session.Interrupted(
- "%d error%s during collection"
- % (session.testsfailed, "s" if session.testsfailed != 1 else "")
- )
-
- if session.config.option.collectonly:
- return True
-
- for i, item in enumerate(session.items):
- nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
- item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
- if session.shouldfail:
- raise session.Failed(session.shouldfail)
- if session.shouldstop:
- raise session.Interrupted(session.shouldstop)
- return True
-
-
-def _in_venv(path: Path) -> bool:
- """Attempt to detect if ``path`` is the root of a Virtual Environment by
- checking for the existence of the appropriate activate script."""
- bindir = path.joinpath("Scripts" if sys.platform.startswith("win") else "bin")
- try:
- if not bindir.is_dir():
- return False
- except OSError:
- return False
- activates = (
- "activate",
- "activate.csh",
- "activate.fish",
- "Activate",
- "Activate.bat",
- "Activate.ps1",
- )
- return any(fname.name in activates for fname in bindir.iterdir())
-
-
-def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]:
- ignore_paths = config._getconftest_pathlist(
- "collect_ignore", path=collection_path.parent, rootpath=config.rootpath
- )
- ignore_paths = ignore_paths or []
- excludeopt = config.getoption("ignore")
- if excludeopt:
- ignore_paths.extend(absolutepath(x) for x in excludeopt)
-
- if collection_path in ignore_paths:
- return True
-
- ignore_globs = config._getconftest_pathlist(
- "collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath
- )
- ignore_globs = ignore_globs or []
- excludeglobopt = config.getoption("ignore_glob")
- if excludeglobopt:
- ignore_globs.extend(absolutepath(x) for x in excludeglobopt)
-
- if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs):
- return True
-
- allow_in_venv = config.getoption("collect_in_virtualenv")
- if not allow_in_venv and _in_venv(collection_path):
- return True
- return None
-
-
-def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None:
- deselect_prefixes = tuple(config.getoption("deselect") or [])
- if not deselect_prefixes:
- return
-
- remaining = []
- deselected = []
- for colitem in items:
- if colitem.nodeid.startswith(deselect_prefixes):
- deselected.append(colitem)
- else:
- remaining.append(colitem)
-
- if deselected:
- config.hook.pytest_deselected(items=deselected)
- items[:] = remaining
-
-
-class FSHookProxy:
- def __init__(self, pm: PytestPluginManager, remove_mods) -> None:
- self.pm = pm
- self.remove_mods = remove_mods
-
- def __getattr__(self, name: str):
- x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
- self.__dict__[name] = x
- return x
-
-
-class Interrupted(KeyboardInterrupt):
- """Signals that the test run was interrupted."""
-
- __module__ = "builtins" # For py3.
-
-
-class Failed(Exception):
- """Signals a stop as failed test run."""
-
-
-@attr.s(slots=True, auto_attribs=True)
-class _bestrelpath_cache(Dict[Path, str]):
- path: Path
-
- def __missing__(self, path: Path) -> str:
- r = bestrelpath(self.path, path)
- self[path] = r
- return r
-
-
-@final
-class Session(nodes.FSCollector):
- Interrupted = Interrupted
- Failed = Failed
- # Set on the session by runner.pytest_sessionstart.
- _setupstate: SetupState
- # Set on the session by fixtures.pytest_sessionstart.
- _fixturemanager: FixtureManager
- exitstatus: Union[int, ExitCode]
-
- def __init__(self, config: Config) -> None:
- super().__init__(
- path=config.rootpath,
- fspath=None,
- parent=None,
- config=config,
- session=self,
- nodeid="",
- )
- self.testsfailed = 0
- self.testscollected = 0
- self.shouldstop: Union[bool, str] = False
- self.shouldfail: Union[bool, str] = False
- self.trace = config.trace.root.get("collection")
- self._initialpaths: FrozenSet[Path] = frozenset()
-
- self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath)
-
- self.config.pluginmanager.register(self, name="session")
-
- @classmethod
- def from_config(cls, config: Config) -> "Session":
- session: Session = cls._create(config=config)
- return session
-
- def __repr__(self) -> str:
- return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
- self.__class__.__name__,
- self.name,
- getattr(self, "exitstatus", "<UNSET>"),
- self.testsfailed,
- self.testscollected,
- )
-
- @property
- def startpath(self) -> Path:
- """The path from which pytest was invoked.
-
- .. versionadded:: 7.0.0
- """
- return self.config.invocation_params.dir
-
- def _node_location_to_relpath(self, node_path: Path) -> str:
- # bestrelpath is a quite slow function.
- return self._bestrelpathcache[node_path]
-
- @hookimpl(tryfirst=True)
- def pytest_collectstart(self) -> None:
- if self.shouldfail:
- raise self.Failed(self.shouldfail)
- if self.shouldstop:
- raise self.Interrupted(self.shouldstop)
-
- @hookimpl(tryfirst=True)
- def pytest_runtest_logreport(
- self, report: Union[TestReport, CollectReport]
- ) -> None:
- if report.failed and not hasattr(report, "wasxfail"):
- self.testsfailed += 1
- maxfail = self.config.getvalue("maxfail")
- if maxfail and self.testsfailed >= maxfail:
- self.shouldfail = "stopping after %d failures" % (self.testsfailed)
-
- pytest_collectreport = pytest_runtest_logreport
-
- def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
- # Optimization: Path(Path(...)) is much slower than isinstance.
- path_ = path if isinstance(path, Path) else Path(path)
- return path_ in self._initialpaths
-
- def gethookproxy(self, fspath: "os.PathLike[str]"):
- # Optimization: Path(Path(...)) is much slower than isinstance.
- path = fspath if isinstance(fspath, Path) else Path(fspath)
- pm = self.config.pluginmanager
- # Check if we have the common case of running
- # hooks with all conftest.py files.
- my_conftestmodules = pm._getconftestmodules(
- path,
- self.config.getoption("importmode"),
- rootpath=self.config.rootpath,
- )
- remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
- if remove_mods:
- # One or more conftests are not in use at this fspath.
- from .config.compat import PathAwareHookProxy
-
- proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods))
- else:
- # All plugins are active for this fspath.
- proxy = self.config.hook
- return proxy
-
- def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
- if direntry.name == "__pycache__":
- return False
- fspath = Path(direntry.path)
- ihook = self.gethookproxy(fspath.parent)
- if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
- return False
- norecursepatterns = self.config.getini("norecursedirs")
- if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns):
- return False
- return True
-
- def _collectfile(
- self, fspath: Path, handle_dupes: bool = True
- ) -> Sequence[nodes.Collector]:
- assert (
- fspath.is_file()
- ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
- fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink()
- )
- ihook = self.gethookproxy(fspath)
- if not self.isinitpath(fspath):
- if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
- return ()
-
- if handle_dupes:
- keepduplicates = self.config.getoption("keepduplicates")
- if not keepduplicates:
- duplicate_paths = self.config.pluginmanager._duplicatepaths
- if fspath in duplicate_paths:
- return ()
- else:
- duplicate_paths.add(fspath)
-
- return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return]
-
- @overload
- def perform_collect(
- self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ...
- ) -> Sequence[nodes.Item]:
- ...
-
- @overload
- def perform_collect(
- self, args: Optional[Sequence[str]] = ..., genitems: bool = ...
- ) -> Sequence[Union[nodes.Item, nodes.Collector]]:
- ...
-
- def perform_collect(
- self, args: Optional[Sequence[str]] = None, genitems: bool = True
- ) -> Sequence[Union[nodes.Item, nodes.Collector]]:
- """Perform the collection phase for this session.
-
- This is called by the default :hook:`pytest_collection` hook
- implementation; see the documentation of this hook for more details.
- For testing purposes, it may also be called directly on a fresh
- ``Session``.
-
- This function normally recursively expands any collectors collected
- from the session to their items, and only items are returned. For
- testing purposes, this may be suppressed by passing ``genitems=False``,
- in which case the return value contains these collectors unexpanded,
- and ``session.items`` is empty.
- """
- if args is None:
- args = self.config.args
-
- self.trace("perform_collect", self, args)
- self.trace.root.indent += 1
-
- self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = []
- self._initial_parts: List[Tuple[Path, List[str]]] = []
- self.items: List[nodes.Item] = []
-
- hook = self.config.hook
-
- items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items
- try:
- initialpaths: List[Path] = []
- for arg in args:
- fspath, parts = resolve_collection_argument(
- self.config.invocation_params.dir,
- arg,
- as_pypath=self.config.option.pyargs,
- )
- self._initial_parts.append((fspath, parts))
- initialpaths.append(fspath)
- self._initialpaths = frozenset(initialpaths)
- rep = collect_one_node(self)
- self.ihook.pytest_collectreport(report=rep)
- self.trace.root.indent -= 1
- if self._notfound:
- errors = []
- for arg, cols in self._notfound:
- line = f"(no name {arg!r} in any of {cols!r})"
- errors.append(f"not found: {arg}\n{line}")
- raise UsageError(*errors)
- if not genitems:
- items = rep.result
- else:
- if rep.passed:
- for node in rep.result:
- self.items.extend(self.genitems(node))
-
- self.config.pluginmanager.check_pending()
- hook.pytest_collection_modifyitems(
- session=self, config=self.config, items=items
- )
- finally:
- hook.pytest_collection_finish(session=self)
-
- self.testscollected = len(items)
- return items
-
- def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]:
- from _pytest.python import Package
-
- # Keep track of any collected nodes in here, so we don't duplicate fixtures.
- node_cache1: Dict[Path, Sequence[nodes.Collector]] = {}
- node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {}
-
- # Keep track of any collected collectors in matchnodes paths, so they
- # are not collected more than once.
- matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {}
-
- # Dirnames of pkgs with dunder-init files.
- pkg_roots: Dict[str, Package] = {}
-
- for argpath, names in self._initial_parts:
- self.trace("processing argument", (argpath, names))
- self.trace.root.indent += 1
-
- # Start with a Session root, and delve to argpath item (dir or file)
- # and stack all Packages found on the way.
- # No point in finding packages when collecting doctests.
- if not self.config.getoption("doctestmodules", False):
- pm = self.config.pluginmanager
- for parent in (argpath, *argpath.parents):
- if not pm._is_in_confcutdir(argpath):
- break
-
- if parent.is_dir():
- pkginit = parent / "__init__.py"
- if pkginit.is_file() and pkginit not in node_cache1:
- col = self._collectfile(pkginit, handle_dupes=False)
- if col:
- if isinstance(col[0], Package):
- pkg_roots[str(parent)] = col[0]
- node_cache1[col[0].path] = [col[0]]
-
- # If it's a directory argument, recurse and look for any Subpackages.
- # Let the Package collector deal with subnodes, don't collect here.
- if argpath.is_dir():
- assert not names, f"invalid arg {(argpath, names)!r}"
-
- seen_dirs: Set[Path] = set()
- for direntry in visit(str(argpath), self._recurse):
- if not direntry.is_file():
- continue
-
- path = Path(direntry.path)
- dirpath = path.parent
-
- if dirpath not in seen_dirs:
- # Collect packages first.
- seen_dirs.add(dirpath)
- pkginit = dirpath / "__init__.py"
- if pkginit.exists():
- for x in self._collectfile(pkginit):
- yield x
- if isinstance(x, Package):
- pkg_roots[str(dirpath)] = x
- if str(dirpath) in pkg_roots:
- # Do not collect packages here.
- continue
-
- for x in self._collectfile(path):
- key2 = (type(x), x.path)
- if key2 in node_cache2:
- yield node_cache2[key2]
- else:
- node_cache2[key2] = x
- yield x
- else:
- assert argpath.is_file()
-
- if argpath in node_cache1:
- col = node_cache1[argpath]
- else:
- collect_root = pkg_roots.get(str(argpath.parent), self)
- col = collect_root._collectfile(argpath, handle_dupes=False)
- if col:
- node_cache1[argpath] = col
-
- matching = []
- work: List[
- Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]]
- ] = [(col, names)]
- while work:
- self.trace("matchnodes", col, names)
- self.trace.root.indent += 1
-
- matchnodes, matchnames = work.pop()
- for node in matchnodes:
- if not matchnames:
- matching.append(node)
- continue
- if not isinstance(node, nodes.Collector):
- continue
- key = (type(node), node.nodeid)
- if key in matchnodes_cache:
- rep = matchnodes_cache[key]
- else:
- rep = collect_one_node(node)
- matchnodes_cache[key] = rep
- if rep.passed:
- submatchnodes = []
- for r in rep.result:
- # TODO: Remove parametrized workaround once collection structure contains
- # parametrization.
- if (
- r.name == matchnames[0]
- or r.name.split("[")[0] == matchnames[0]
- ):
- submatchnodes.append(r)
- if submatchnodes:
- work.append((submatchnodes, matchnames[1:]))
- else:
- # Report collection failures here to avoid failing to run some test
- # specified in the command line because the module could not be
- # imported (#134).
- node.ihook.pytest_collectreport(report=rep)
-
- self.trace("matchnodes finished -> ", len(matching), "nodes")
- self.trace.root.indent -= 1
-
- if not matching:
- report_arg = "::".join((str(argpath), *names))
- self._notfound.append((report_arg, col))
- continue
-
- # If __init__.py was the only file requested, then the matched
- # node will be the corresponding Package (by default), and the
- # first yielded item will be the __init__ Module itself, so
- # just use that. If this special case isn't taken, then all the
- # files in the package will be yielded.
- if argpath.name == "__init__.py" and isinstance(matching[0], Package):
- try:
- yield next(iter(matching[0].collect()))
- except StopIteration:
- # The package collects nothing with only an __init__.py
- # file in it, which gets ignored by the default
- # "python_files" option.
- pass
- continue
-
- yield from matching
-
- self.trace.root.indent -= 1
-
- def genitems(
- self, node: Union[nodes.Item, nodes.Collector]
- ) -> Iterator[nodes.Item]:
- self.trace("genitems", node)
- if isinstance(node, nodes.Item):
- node.ihook.pytest_itemcollected(item=node)
- yield node
- else:
- assert isinstance(node, nodes.Collector)
- rep = collect_one_node(node)
- if rep.passed:
- for subnode in rep.result:
- yield from self.genitems(subnode)
- node.ihook.pytest_collectreport(report=rep)
-
-
-def search_pypath(module_name: str) -> str:
- """Search sys.path for the given a dotted module name, and return its file system path."""
- try:
- spec = importlib.util.find_spec(module_name)
- # AttributeError: looks like package module, but actually filename
- # ImportError: module does not exist
- # ValueError: not a module name
- except (AttributeError, ImportError, ValueError):
- return module_name
- if spec is None or spec.origin is None or spec.origin == "namespace":
- return module_name
- elif spec.submodule_search_locations:
- return os.path.dirname(spec.origin)
- else:
- return spec.origin
-
-
-def resolve_collection_argument(
- invocation_path: Path, arg: str, *, as_pypath: bool = False
-) -> Tuple[Path, List[str]]:
- """Parse path arguments optionally containing selection parts and return (fspath, names).
-
- Command-line arguments can point to files and/or directories, and optionally contain
- parts for specific tests selection, for example:
-
- "pkg/tests/test_foo.py::TestClass::test_foo"
-
- This function ensures the path exists, and returns a tuple:
-
- (Path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"])
-
- When as_pypath is True, expects that the command-line argument actually contains
- module paths instead of file-system paths:
-
- "pkg.tests.test_foo::TestClass::test_foo"
-
- In which case we search sys.path for a matching module, and then return the *path* to the
- found module.
-
- If the path doesn't exist, raise UsageError.
- If the path is a directory and selection parts are present, raise UsageError.
- """
- base, squacket, rest = str(arg).partition("[")
- strpath, *parts = base.split("::")
- if parts:
- parts[-1] = f"{parts[-1]}{squacket}{rest}"
- if as_pypath:
- strpath = search_pypath(strpath)
- fspath = invocation_path / strpath
- fspath = absolutepath(fspath)
- if not fspath.exists():
- msg = (
- "module or package not found: {arg} (missing __init__.py?)"
- if as_pypath
- else "file or directory not found: {arg}"
- )
- raise UsageError(msg.format(arg=arg))
- if parts and fspath.is_dir():
- msg = (
- "package argument cannot contain :: selection parts: {arg}"
- if as_pypath
- else "directory argument cannot contain :: selection parts: {arg}"
- )
- raise UsageError(msg.format(arg=arg))
- return fspath, parts
diff --git a/contrib/python/pytest/py3/_pytest/mark/__init__.py b/contrib/python/pytest/py3/_pytest/mark/__init__.py
deleted file mode 100644
index 11e6e34d73..0000000000
--- a/contrib/python/pytest/py3/_pytest/mark/__init__.py
+++ /dev/null
@@ -1,266 +0,0 @@
-"""Generic mechanism for marking and selecting python functions."""
-from typing import AbstractSet
-from typing import Collection
-from typing import List
-from typing import Optional
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-
-from .expression import Expression
-from .expression import ParseError
-from .structures import EMPTY_PARAMETERSET_OPTION
-from .structures import get_empty_parameterset_mark
-from .structures import Mark
-from .structures import MARK_GEN
-from .structures import MarkDecorator
-from .structures import MarkGenerator
-from .structures import ParameterSet
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config import UsageError
-from _pytest.config.argparsing import Parser
-from _pytest.stash import StashKey
-
-if TYPE_CHECKING:
- from _pytest.nodes import Item
-
-
-__all__ = [
- "MARK_GEN",
- "Mark",
- "MarkDecorator",
- "MarkGenerator",
- "ParameterSet",
- "get_empty_parameterset_mark",
-]
-
-
-old_mark_config_key = StashKey[Optional[Config]]()
-
-
-def param(
- *values: object,
- marks: Union[MarkDecorator, Collection[Union[MarkDecorator, Mark]]] = (),
- id: Optional[str] = None,
-) -> ParameterSet:
- """Specify a parameter in `pytest.mark.parametrize`_ calls or
- :ref:`parametrized fixtures <fixture-parametrize-marks>`.
-
- .. code-block:: python
-
- @pytest.mark.parametrize(
- "test_input,expected",
- [
- ("3+5", 8),
- pytest.param("6*9", 42, marks=pytest.mark.xfail),
- ],
- )
- def test_eval(test_input, expected):
- assert eval(test_input) == expected
-
- :param values: Variable args of the values of the parameter set, in order.
- :keyword marks: A single mark or a list of marks to be applied to this parameter set.
- :keyword str id: The id to attribute to this parameter set.
- """
- return ParameterSet.param(*values, marks=marks, id=id)
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group._addoption(
- "-k",
- action="store",
- dest="keyword",
- default="",
- metavar="EXPRESSION",
- help="only run tests which match the given substring expression. "
- "An expression is a python evaluatable expression "
- "where all names are substring-matched against test names "
- "and their parent classes. Example: -k 'test_method or test_"
- "other' matches all test functions and classes whose name "
- "contains 'test_method' or 'test_other', while -k 'not test_method' "
- "matches those that don't contain 'test_method' in their names. "
- "-k 'not test_method and not test_other' will eliminate the matches. "
- "Additionally keywords are matched to classes and functions "
- "containing extra names in their 'extra_keyword_matches' set, "
- "as well as functions which have names assigned directly to them. "
- "The matching is case-insensitive.",
- )
-
- group._addoption(
- "-m",
- action="store",
- dest="markexpr",
- default="",
- metavar="MARKEXPR",
- help="only run tests matching given mark expression.\n"
- "For example: -m 'mark1 and not mark2'.",
- )
-
- group.addoption(
- "--markers",
- action="store_true",
- help="show markers (builtin, plugin and per-project ones).",
- )
-
- parser.addini("markers", "markers for test functions", "linelist")
- parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
-
-
-@hookimpl(tryfirst=True)
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- import _pytest.config
-
- if config.option.markers:
- config._do_configure()
- tw = _pytest.config.create_terminal_writer(config)
- for line in config.getini("markers"):
- parts = line.split(":", 1)
- name = parts[0]
- rest = parts[1] if len(parts) == 2 else ""
- tw.write("@pytest.mark.%s:" % name, bold=True)
- tw.line(rest)
- tw.line()
- config._ensure_unconfigure()
- return 0
-
- return None
-
-
-@attr.s(slots=True, auto_attribs=True)
-class KeywordMatcher:
- """A matcher for keywords.
-
- Given a list of names, matches any substring of one of these names. The
- string inclusion check is case-insensitive.
-
- Will match on the name of colitem, including the names of its parents.
- Only matches names of items which are either a :class:`Class` or a
- :class:`Function`.
-
- Additionally, matches on names in the 'extra_keyword_matches' set of
- any item, as well as names directly assigned to test functions.
- """
-
- _names: AbstractSet[str]
-
- @classmethod
- def from_item(cls, item: "Item") -> "KeywordMatcher":
- mapped_names = set()
-
- # Add the names of the current item and any parent items.
- import pytest
-
- for node in item.listchain():
- if not isinstance(node, pytest.Session):
- mapped_names.add(node.name)
-
- # Add the names added as extra keywords to current or parent items.
- mapped_names.update(item.listextrakeywords())
-
- # Add the names attached to the current function through direct assignment.
- function_obj = getattr(item, "function", None)
- if function_obj:
- mapped_names.update(function_obj.__dict__)
-
- # Add the markers to the keywords as we no longer handle them correctly.
- mapped_names.update(mark.name for mark in item.iter_markers())
-
- return cls(mapped_names)
-
- def __call__(self, subname: str) -> bool:
- subname = subname.lower()
- names = (name.lower() for name in self._names)
-
- for name in names:
- if subname in name:
- return True
- return False
-
-
-def deselect_by_keyword(items: "List[Item]", config: Config) -> None:
- keywordexpr = config.option.keyword.lstrip()
- if not keywordexpr:
- return
-
- expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'")
-
- remaining = []
- deselected = []
- for colitem in items:
- if not expr.evaluate(KeywordMatcher.from_item(colitem)):
- deselected.append(colitem)
- else:
- remaining.append(colitem)
-
- if deselected:
- config.hook.pytest_deselected(items=deselected)
- items[:] = remaining
-
-
-@attr.s(slots=True, auto_attribs=True)
-class MarkMatcher:
- """A matcher for markers which are present.
-
- Tries to match on any marker names, attached to the given colitem.
- """
-
- own_mark_names: AbstractSet[str]
-
- @classmethod
- def from_item(cls, item: "Item") -> "MarkMatcher":
- mark_names = {mark.name for mark in item.iter_markers()}
- return cls(mark_names)
-
- def __call__(self, name: str) -> bool:
- return name in self.own_mark_names
-
-
-def deselect_by_mark(items: "List[Item]", config: Config) -> None:
- matchexpr = config.option.markexpr
- if not matchexpr:
- return
-
- expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'")
- remaining: List[Item] = []
- deselected: List[Item] = []
- for item in items:
- if expr.evaluate(MarkMatcher.from_item(item)):
- remaining.append(item)
- else:
- deselected.append(item)
- if deselected:
- config.hook.pytest_deselected(items=deselected)
- items[:] = remaining
-
-
-def _parse_expression(expr: str, exc_message: str) -> Expression:
- try:
- return Expression.compile(expr)
- except ParseError as e:
- raise UsageError(f"{exc_message}: {expr}: {e}") from None
-
-
-def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None:
- deselect_by_keyword(items, config)
- deselect_by_mark(items, config)
-
-
-def pytest_configure(config: Config) -> None:
- config.stash[old_mark_config_key] = MARK_GEN._config
- MARK_GEN._config = config
-
- empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
-
- if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""):
- raise UsageError(
- "{!s} must be one of skip, xfail or fail_at_collect"
- " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
- )
-
-
-def pytest_unconfigure(config: Config) -> None:
- MARK_GEN._config = config.stash.get(old_mark_config_key, None)
diff --git a/contrib/python/pytest/py3/_pytest/mark/expression.py b/contrib/python/pytest/py3/_pytest/mark/expression.py
deleted file mode 100644
index 92220d7723..0000000000
--- a/contrib/python/pytest/py3/_pytest/mark/expression.py
+++ /dev/null
@@ -1,225 +0,0 @@
-r"""Evaluate match expressions, as used by `-k` and `-m`.
-
-The grammar is:
-
-expression: expr? EOF
-expr: and_expr ('or' and_expr)*
-and_expr: not_expr ('and' not_expr)*
-not_expr: 'not' not_expr | '(' expr ')' | ident
-ident: (\w|:|\+|-|\.|\[|\]|\\|/)+
-
-The semantics are:
-
-- Empty expression evaluates to False.
-- ident evaluates to True of False according to a provided matcher function.
-- or/and/not evaluate according to the usual boolean semantics.
-"""
-import ast
-import enum
-import re
-import types
-from typing import Callable
-from typing import Iterator
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-from typing import TYPE_CHECKING
-
-import attr
-
-if TYPE_CHECKING:
- from typing import NoReturn
-
-
-__all__ = [
- "Expression",
- "ParseError",
-]
-
-
-class TokenType(enum.Enum):
- LPAREN = "left parenthesis"
- RPAREN = "right parenthesis"
- OR = "or"
- AND = "and"
- NOT = "not"
- IDENT = "identifier"
- EOF = "end of input"
-
-
-@attr.s(frozen=True, slots=True, auto_attribs=True)
-class Token:
- type: TokenType
- value: str
- pos: int
-
-
-class ParseError(Exception):
- """The expression contains invalid syntax.
-
- :param column: The column in the line where the error occurred (1-based).
- :param message: A description of the error.
- """
-
- def __init__(self, column: int, message: str) -> None:
- self.column = column
- self.message = message
-
- def __str__(self) -> str:
- return f"at column {self.column}: {self.message}"
-
-
-class Scanner:
- __slots__ = ("tokens", "current")
-
- def __init__(self, input: str) -> None:
- self.tokens = self.lex(input)
- self.current = next(self.tokens)
-
- def lex(self, input: str) -> Iterator[Token]:
- pos = 0
- while pos < len(input):
- if input[pos] in (" ", "\t"):
- pos += 1
- elif input[pos] == "(":
- yield Token(TokenType.LPAREN, "(", pos)
- pos += 1
- elif input[pos] == ")":
- yield Token(TokenType.RPAREN, ")", pos)
- pos += 1
- else:
- match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:])
- if match:
- value = match.group(0)
- if value == "or":
- yield Token(TokenType.OR, value, pos)
- elif value == "and":
- yield Token(TokenType.AND, value, pos)
- elif value == "not":
- yield Token(TokenType.NOT, value, pos)
- else:
- yield Token(TokenType.IDENT, value, pos)
- pos += len(value)
- else:
- raise ParseError(
- pos + 1,
- f'unexpected character "{input[pos]}"',
- )
- yield Token(TokenType.EOF, "", pos)
-
- def accept(self, type: TokenType, *, reject: bool = False) -> Optional[Token]:
- if self.current.type is type:
- token = self.current
- if token.type is not TokenType.EOF:
- self.current = next(self.tokens)
- return token
- if reject:
- self.reject((type,))
- return None
-
- def reject(self, expected: Sequence[TokenType]) -> "NoReturn":
- raise ParseError(
- self.current.pos + 1,
- "expected {}; got {}".format(
- " OR ".join(type.value for type in expected),
- self.current.type.value,
- ),
- )
-
-
-# True, False and None are legal match expression identifiers,
-# but illegal as Python identifiers. To fix this, this prefix
-# is added to identifiers in the conversion to Python AST.
-IDENT_PREFIX = "$"
-
-
-def expression(s: Scanner) -> ast.Expression:
- if s.accept(TokenType.EOF):
- ret: ast.expr = ast.NameConstant(False)
- else:
- ret = expr(s)
- s.accept(TokenType.EOF, reject=True)
- return ast.fix_missing_locations(ast.Expression(ret))
-
-
-def expr(s: Scanner) -> ast.expr:
- ret = and_expr(s)
- while s.accept(TokenType.OR):
- rhs = and_expr(s)
- ret = ast.BoolOp(ast.Or(), [ret, rhs])
- return ret
-
-
-def and_expr(s: Scanner) -> ast.expr:
- ret = not_expr(s)
- while s.accept(TokenType.AND):
- rhs = not_expr(s)
- ret = ast.BoolOp(ast.And(), [ret, rhs])
- return ret
-
-
-def not_expr(s: Scanner) -> ast.expr:
- if s.accept(TokenType.NOT):
- return ast.UnaryOp(ast.Not(), not_expr(s))
- if s.accept(TokenType.LPAREN):
- ret = expr(s)
- s.accept(TokenType.RPAREN, reject=True)
- return ret
- ident = s.accept(TokenType.IDENT)
- if ident:
- return ast.Name(IDENT_PREFIX + ident.value, ast.Load())
- s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT))
-
-
-class MatcherAdapter(Mapping[str, bool]):
- """Adapts a matcher function to a locals mapping as required by eval()."""
-
- def __init__(self, matcher: Callable[[str], bool]) -> None:
- self.matcher = matcher
-
- def __getitem__(self, key: str) -> bool:
- return self.matcher(key[len(IDENT_PREFIX) :])
-
- def __iter__(self) -> Iterator[str]:
- raise NotImplementedError()
-
- def __len__(self) -> int:
- raise NotImplementedError()
-
-
-class Expression:
- """A compiled match expression as used by -k and -m.
-
- The expression can be evaluated against different matchers.
- """
-
- __slots__ = ("code",)
-
- def __init__(self, code: types.CodeType) -> None:
- self.code = code
-
- @classmethod
- def compile(self, input: str) -> "Expression":
- """Compile a match expression.
-
- :param input: The input expression - one line.
- """
- astexpr = expression(Scanner(input))
- code: types.CodeType = compile(
- astexpr,
- filename="<pytest match expression>",
- mode="eval",
- )
- return Expression(code)
-
- def evaluate(self, matcher: Callable[[str], bool]) -> bool:
- """Evaluate the match expression.
-
- :param matcher:
- Given an identifier, should return whether it matches or not.
- Should be prepared to handle arbitrary strings as input.
-
- :returns: Whether the expression matches or not.
- """
- ret: bool = eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))
- return ret
diff --git a/contrib/python/pytest/py3/_pytest/mark/structures.py b/contrib/python/pytest/py3/_pytest/mark/structures.py
deleted file mode 100644
index 93d6778c4e..0000000000
--- a/contrib/python/pytest/py3/_pytest/mark/structures.py
+++ /dev/null
@@ -1,593 +0,0 @@
-import collections.abc
-import inspect
-import warnings
-from typing import Any
-from typing import Callable
-from typing import Collection
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Mapping
-from typing import MutableMapping
-from typing import NamedTuple
-from typing import Optional
-from typing import overload
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import attr
-
-from .._code import getfslineno
-from ..compat import ascii_escaped
-from ..compat import final
-from ..compat import NOTSET
-from ..compat import NotSetType
-from _pytest.config import Config
-from _pytest.deprecated import check_ispytest
-from _pytest.outcomes import fail
-from _pytest.warning_types import PytestUnknownMarkWarning
-
-if TYPE_CHECKING:
- from ..nodes import Node
-
-
-EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark"
-
-
-def istestfunc(func) -> bool:
- return callable(func) and getattr(func, "__name__", "<lambda>") != "<lambda>"
-
-
-def get_empty_parameterset_mark(
- config: Config, argnames: Sequence[str], func
-) -> "MarkDecorator":
- from ..nodes import Collector
-
- fs, lineno = getfslineno(func)
- reason = "got empty parameter set %r, function %s at %s:%d" % (
- argnames,
- func.__name__,
- fs,
- lineno,
- )
-
- requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
- if requested_mark in ("", None, "skip"):
- mark = MARK_GEN.skip(reason=reason)
- elif requested_mark == "xfail":
- mark = MARK_GEN.xfail(reason=reason, run=False)
- elif requested_mark == "fail_at_collect":
- f_name = func.__name__
- _, lineno = getfslineno(func)
- raise Collector.CollectError(
- "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1)
- )
- else:
- raise LookupError(requested_mark)
- return mark
-
-
-class ParameterSet(NamedTuple):
- values: Sequence[Union[object, NotSetType]]
- marks: Collection[Union["MarkDecorator", "Mark"]]
- id: Optional[str]
-
- @classmethod
- def param(
- cls,
- *values: object,
- marks: Union["MarkDecorator", Collection[Union["MarkDecorator", "Mark"]]] = (),
- id: Optional[str] = None,
- ) -> "ParameterSet":
- if isinstance(marks, MarkDecorator):
- marks = (marks,)
- else:
- assert isinstance(marks, collections.abc.Collection)
-
- if id is not None:
- if not isinstance(id, str):
- raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}")
- id = ascii_escaped(id)
- return cls(values, marks, id)
-
- @classmethod
- def extract_from(
- cls,
- parameterset: Union["ParameterSet", Sequence[object], object],
- force_tuple: bool = False,
- ) -> "ParameterSet":
- """Extract from an object or objects.
-
- :param parameterset:
- A legacy style parameterset that may or may not be a tuple,
- and may or may not be wrapped into a mess of mark objects.
-
- :param force_tuple:
- Enforce tuple wrapping so single argument tuple values
- don't get decomposed and break tests.
- """
-
- if isinstance(parameterset, cls):
- return parameterset
- if force_tuple:
- return cls.param(parameterset)
- else:
- # TODO: Refactor to fix this type-ignore. Currently the following
- # passes type-checking but crashes:
- #
- # @pytest.mark.parametrize(('x', 'y'), [1, 2])
- # def test_foo(x, y): pass
- return cls(parameterset, marks=[], id=None) # type: ignore[arg-type]
-
- @staticmethod
- def _parse_parametrize_args(
- argnames: Union[str, List[str], Tuple[str, ...]],
- argvalues: Iterable[Union["ParameterSet", Sequence[object], object]],
- *args,
- **kwargs,
- ) -> Tuple[Union[List[str], Tuple[str, ...]], bool]:
- if not isinstance(argnames, (tuple, list)):
- argnames = [x.strip() for x in argnames.split(",") if x.strip()]
- force_tuple = len(argnames) == 1
- else:
- force_tuple = False
- return argnames, force_tuple
-
- @staticmethod
- def _parse_parametrize_parameters(
- argvalues: Iterable[Union["ParameterSet", Sequence[object], object]],
- force_tuple: bool,
- ) -> List["ParameterSet"]:
- return [
- ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues
- ]
-
- @classmethod
- def _for_parametrize(
- cls,
- argnames: Union[str, List[str], Tuple[str, ...]],
- argvalues: Iterable[Union["ParameterSet", Sequence[object], object]],
- func,
- config: Config,
- nodeid: str,
- ) -> Tuple[Union[List[str], Tuple[str, ...]], List["ParameterSet"]]:
- argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues)
- parameters = cls._parse_parametrize_parameters(argvalues, force_tuple)
- del argvalues
-
- if parameters:
- # Check all parameter sets have the correct number of values.
- for param in parameters:
- if len(param.values) != len(argnames):
- msg = (
- '{nodeid}: in "parametrize" the number of names ({names_len}):\n'
- " {names}\n"
- "must be equal to the number of values ({values_len}):\n"
- " {values}"
- )
- fail(
- msg.format(
- nodeid=nodeid,
- values=param.values,
- names=argnames,
- names_len=len(argnames),
- values_len=len(param.values),
- ),
- pytrace=False,
- )
- else:
- # Empty parameter set (likely computed at runtime): create a single
- # parameter set with NOTSET values, with the "empty parameter set" mark applied to it.
- mark = get_empty_parameterset_mark(config, argnames, func)
- parameters.append(
- ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
- )
- return argnames, parameters
-
-
-@final
-@attr.s(frozen=True, init=False, auto_attribs=True)
-class Mark:
- #: Name of the mark.
- name: str
- #: Positional arguments of the mark decorator.
- args: Tuple[Any, ...]
- #: Keyword arguments of the mark decorator.
- kwargs: Mapping[str, Any]
-
- #: Source Mark for ids with parametrize Marks.
- _param_ids_from: Optional["Mark"] = attr.ib(default=None, repr=False)
- #: Resolved/generated ids with parametrize Marks.
- _param_ids_generated: Optional[Sequence[str]] = attr.ib(default=None, repr=False)
-
- def __init__(
- self,
- name: str,
- args: Tuple[Any, ...],
- kwargs: Mapping[str, Any],
- param_ids_from: Optional["Mark"] = None,
- param_ids_generated: Optional[Sequence[str]] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- """:meta private:"""
- check_ispytest(_ispytest)
- # Weirdness to bypass frozen=True.
- object.__setattr__(self, "name", name)
- object.__setattr__(self, "args", args)
- object.__setattr__(self, "kwargs", kwargs)
- object.__setattr__(self, "_param_ids_from", param_ids_from)
- object.__setattr__(self, "_param_ids_generated", param_ids_generated)
-
- def _has_param_ids(self) -> bool:
- return "ids" in self.kwargs or len(self.args) >= 4
-
- def combined_with(self, other: "Mark") -> "Mark":
- """Return a new Mark which is a combination of this
- Mark and another Mark.
-
- Combines by appending args and merging kwargs.
-
- :param Mark other: The mark to combine with.
- :rtype: Mark
- """
- assert self.name == other.name
-
- # Remember source of ids with parametrize Marks.
- param_ids_from: Optional[Mark] = None
- if self.name == "parametrize":
- if other._has_param_ids():
- param_ids_from = other
- elif self._has_param_ids():
- param_ids_from = self
-
- return Mark(
- self.name,
- self.args + other.args,
- dict(self.kwargs, **other.kwargs),
- param_ids_from=param_ids_from,
- _ispytest=True,
- )
-
-
-# A generic parameter designating an object to which a Mark may
-# be applied -- a test function (callable) or class.
-# Note: a lambda is not allowed, but this can't be represented.
-Markable = TypeVar("Markable", bound=Union[Callable[..., object], type])
-
-
-@attr.s(init=False, auto_attribs=True)
-class MarkDecorator:
- """A decorator for applying a mark on test functions and classes.
-
- ``MarkDecorators`` are created with ``pytest.mark``::
-
- mark1 = pytest.mark.NAME # Simple MarkDecorator
- mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator
-
- and can then be applied as decorators to test functions::
-
- @mark2
- def test_function():
- pass
-
- When a ``MarkDecorator`` is called, it does the following:
-
- 1. If called with a single class as its only positional argument and no
- additional keyword arguments, it attaches the mark to the class so it
- gets applied automatically to all test cases found in that class.
-
- 2. If called with a single function as its only positional argument and
- no additional keyword arguments, it attaches the mark to the function,
- containing all the arguments already stored internally in the
- ``MarkDecorator``.
-
- 3. When called in any other case, it returns a new ``MarkDecorator``
- instance with the original ``MarkDecorator``'s content updated with
- the arguments passed to this call.
-
- Note: The rules above prevent a ``MarkDecorator`` from storing only a
- single function or class reference as its positional argument with no
- additional keyword or positional arguments. You can work around this by
- using `with_args()`.
- """
-
- mark: Mark
-
- def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None:
- """:meta private:"""
- check_ispytest(_ispytest)
- self.mark = mark
-
- @property
- def name(self) -> str:
- """Alias for mark.name."""
- return self.mark.name
-
- @property
- def args(self) -> Tuple[Any, ...]:
- """Alias for mark.args."""
- return self.mark.args
-
- @property
- def kwargs(self) -> Mapping[str, Any]:
- """Alias for mark.kwargs."""
- return self.mark.kwargs
-
- @property
- def markname(self) -> str:
- """:meta private:"""
- return self.name # for backward-compat (2.4.1 had this attr)
-
- def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator":
- """Return a MarkDecorator with extra arguments added.
-
- Unlike calling the MarkDecorator, with_args() can be used even
- if the sole argument is a callable/class.
- """
- mark = Mark(self.name, args, kwargs, _ispytest=True)
- return MarkDecorator(self.mark.combined_with(mark), _ispytest=True)
-
- # Type ignored because the overloads overlap with an incompatible
- # return type. Not much we can do about that. Thankfully mypy picks
- # the first match so it works out even if we break the rules.
- @overload
- def __call__(self, arg: Markable) -> Markable: # type: ignore[misc]
- pass
-
- @overload
- def __call__(self, *args: object, **kwargs: object) -> "MarkDecorator":
- pass
-
- def __call__(self, *args: object, **kwargs: object):
- """Call the MarkDecorator."""
- if args and not kwargs:
- func = args[0]
- is_class = inspect.isclass(func)
- if len(args) == 1 and (istestfunc(func) or is_class):
- store_mark(func, self.mark)
- return func
- return self.with_args(*args, **kwargs)
-
-
-def get_unpacked_marks(obj: object) -> Iterable[Mark]:
- """Obtain the unpacked marks that are stored on an object."""
- mark_list = getattr(obj, "pytestmark", [])
- if not isinstance(mark_list, list):
- mark_list = [mark_list]
- return normalize_mark_list(mark_list)
-
-
-def normalize_mark_list(
- mark_list: Iterable[Union[Mark, MarkDecorator]]
-) -> Iterable[Mark]:
- """
- Normalize an iterable of Mark or MarkDecorator objects into a list of marks
- by retrieving the `mark` attribute on MarkDecorator instances.
-
- :param mark_list: marks to normalize
- :returns: A new list of the extracted Mark objects
- """
- for mark in mark_list:
- mark_obj = getattr(mark, "mark", mark)
- if not isinstance(mark_obj, Mark):
- raise TypeError(f"got {repr(mark_obj)} instead of Mark")
- yield mark_obj
-
-
-def store_mark(obj, mark: Mark) -> None:
- """Store a Mark on an object.
-
- This is used to implement the Mark declarations/decorators correctly.
- """
- assert isinstance(mark, Mark), mark
- # Always reassign name to avoid updating pytestmark in a reference that
- # was only borrowed.
- obj.pytestmark = [*get_unpacked_marks(obj), mark]
-
-
-# Typing for builtin pytest marks. This is cheating; it gives builtin marks
-# special privilege, and breaks modularity. But practicality beats purity...
-if TYPE_CHECKING:
- from _pytest.scope import _ScopeName
-
- class _SkipMarkDecorator(MarkDecorator):
- @overload # type: ignore[override,misc]
- def __call__(self, arg: Markable) -> Markable:
- ...
-
- @overload
- def __call__(self, reason: str = ...) -> "MarkDecorator":
- ...
-
- class _SkipifMarkDecorator(MarkDecorator):
- def __call__( # type: ignore[override]
- self,
- condition: Union[str, bool] = ...,
- *conditions: Union[str, bool],
- reason: str = ...,
- ) -> MarkDecorator:
- ...
-
- class _XfailMarkDecorator(MarkDecorator):
- @overload # type: ignore[override,misc]
- def __call__(self, arg: Markable) -> Markable:
- ...
-
- @overload
- def __call__(
- self,
- condition: Union[str, bool] = ...,
- *conditions: Union[str, bool],
- reason: str = ...,
- run: bool = ...,
- raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ...,
- strict: bool = ...,
- ) -> MarkDecorator:
- ...
-
- class _ParametrizeMarkDecorator(MarkDecorator):
- def __call__( # type: ignore[override]
- self,
- argnames: Union[str, List[str], Tuple[str, ...]],
- argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
- *,
- indirect: Union[bool, Sequence[str]] = ...,
- ids: Optional[
- Union[
- Iterable[Union[None, str, float, int, bool]],
- Callable[[Any], Optional[object]],
- ]
- ] = ...,
- scope: Optional[_ScopeName] = ...,
- ) -> MarkDecorator:
- ...
-
- class _UsefixturesMarkDecorator(MarkDecorator):
- def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override]
- ...
-
- class _FilterwarningsMarkDecorator(MarkDecorator):
- def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override]
- ...
-
-
-@final
-class MarkGenerator:
- """Factory for :class:`MarkDecorator` objects - exposed as
- a ``pytest.mark`` singleton instance.
-
- Example::
-
- import pytest
-
- @pytest.mark.slowtest
- def test_function():
- pass
-
- applies a 'slowtest' :class:`Mark` on ``test_function``.
- """
-
- # See TYPE_CHECKING above.
- if TYPE_CHECKING:
- skip: _SkipMarkDecorator
- skipif: _SkipifMarkDecorator
- xfail: _XfailMarkDecorator
- parametrize: _ParametrizeMarkDecorator
- usefixtures: _UsefixturesMarkDecorator
- filterwarnings: _FilterwarningsMarkDecorator
-
- def __init__(self, *, _ispytest: bool = False) -> None:
- check_ispytest(_ispytest)
- self._config: Optional[Config] = None
- self._markers: Set[str] = set()
-
- def __getattr__(self, name: str) -> MarkDecorator:
- """Generate a new :class:`MarkDecorator` with the given name."""
- if name[0] == "_":
- raise AttributeError("Marker name must NOT start with underscore")
-
- if self._config is not None:
- # We store a set of markers as a performance optimisation - if a mark
- # name is in the set we definitely know it, but a mark may be known and
- # not in the set. We therefore start by updating the set!
- if name not in self._markers:
- for line in self._config.getini("markers"):
- # example lines: "skipif(condition): skip the given test if..."
- # or "hypothesis: tests which use Hypothesis", so to get the
- # marker name we split on both `:` and `(`.
- if line == "ya:external":
- marker = line
- else:
- marker = line.split(":")[0].split("(")[0].strip()
- self._markers.add(marker)
-
- # If the name is not in the set of known marks after updating,
- # then it really is time to issue a warning or an error.
- if name not in self._markers:
- if self._config.option.strict_markers or self._config.option.strict:
- fail(
- f"{name!r} not found in `markers` configuration option",
- pytrace=False,
- )
-
- # Raise a specific error for common misspellings of "parametrize".
- if name in ["parameterize", "parametrise", "parameterise"]:
- __tracebackhide__ = True
- fail(f"Unknown '{name}' mark, did you mean 'parametrize'?")
-
- warnings.warn(
- "Unknown pytest.mark.%s - is this a typo? You can register "
- "custom marks to avoid this warning - for details, see "
- "https://docs.pytest.org/en/stable/how-to/mark.html" % name,
- PytestUnknownMarkWarning,
- 2,
- )
-
- return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True)
-
-
-MARK_GEN = MarkGenerator(_ispytest=True)
-
-
-@final
-class NodeKeywords(MutableMapping[str, Any]):
- __slots__ = ("node", "parent", "_markers")
-
- def __init__(self, node: "Node") -> None:
- self.node = node
- self.parent = node.parent
- self._markers = {node.name: True}
-
- def __getitem__(self, key: str) -> Any:
- try:
- return self._markers[key]
- except KeyError:
- if self.parent is None:
- raise
- return self.parent.keywords[key]
-
- def __setitem__(self, key: str, value: Any) -> None:
- self._markers[key] = value
-
- # Note: we could've avoided explicitly implementing some of the methods
- # below and use the collections.abc fallback, but that would be slow.
-
- def __contains__(self, key: object) -> bool:
- return (
- key in self._markers
- or self.parent is not None
- and key in self.parent.keywords
- )
-
- def update( # type: ignore[override]
- self,
- other: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]] = (),
- **kwds: Any,
- ) -> None:
- self._markers.update(other)
- self._markers.update(kwds)
-
- def __delitem__(self, key: str) -> None:
- raise ValueError("cannot delete key in keywords dict")
-
- def __iter__(self) -> Iterator[str]:
- # Doesn't need to be fast.
- yield from self._markers
- if self.parent is not None:
- for keyword in self.parent.keywords:
- # self._marks and self.parent.keywords can have duplicates.
- if keyword not in self._markers:
- yield keyword
-
- def __len__(self) -> int:
- # Doesn't need to be fast.
- return sum(1 for keyword in self)
-
- def __repr__(self) -> str:
- return f"<NodeKeywords for node {self.node}>"
diff --git a/contrib/python/pytest/py3/_pytest/monkeypatch.py b/contrib/python/pytest/py3/_pytest/monkeypatch.py
deleted file mode 100644
index 91d590fb3d..0000000000
--- a/contrib/python/pytest/py3/_pytest/monkeypatch.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""Monkeypatching and mocking functionality."""
-import os
-import re
-import sys
-import warnings
-from contextlib import contextmanager
-from typing import Any
-from typing import Generator
-from typing import List
-from typing import MutableMapping
-from typing import Optional
-from typing import overload
-from typing import Tuple
-from typing import TypeVar
-from typing import Union
-
-from _pytest.compat import final
-from _pytest.fixtures import fixture
-from _pytest.warning_types import PytestWarning
-
-RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
-
-
-K = TypeVar("K")
-V = TypeVar("V")
-
-
-@fixture
-def monkeypatch() -> Generator["MonkeyPatch", None, None]:
- """A convenient fixture for monkey-patching.
-
- The fixture provides these methods to modify objects, dictionaries or
- os.environ::
-
- monkeypatch.setattr(obj, name, value, raising=True)
- monkeypatch.delattr(obj, name, raising=True)
- monkeypatch.setitem(mapping, name, value)
- monkeypatch.delitem(obj, name, raising=True)
- monkeypatch.setenv(name, value, prepend=None)
- monkeypatch.delenv(name, raising=True)
- monkeypatch.syspath_prepend(path)
- monkeypatch.chdir(path)
-
- All modifications will be undone after the requesting test function or
- fixture has finished. The ``raising`` parameter determines if a KeyError
- or AttributeError will be raised if the set/deletion operation has no target.
- """
- mpatch = MonkeyPatch()
- yield mpatch
- mpatch.undo()
-
-
-def resolve(name: str) -> object:
- # Simplified from zope.dottedname.
- parts = name.split(".")
-
- used = parts.pop(0)
- found: object = __import__(used)
- for part in parts:
- used += "." + part
- try:
- found = getattr(found, part)
- except AttributeError:
- pass
- else:
- continue
- # We use explicit un-nesting of the handling block in order
- # to avoid nested exceptions.
- try:
- __import__(used)
- except ImportError as ex:
- expected = str(ex).split()[-1]
- if expected == used:
- raise
- else:
- raise ImportError(f"import error in {used}: {ex}") from ex
- found = annotated_getattr(found, part, used)
- return found
-
-
-def annotated_getattr(obj: object, name: str, ann: str) -> object:
- try:
- obj = getattr(obj, name)
- except AttributeError as e:
- raise AttributeError(
- "{!r} object at {} has no attribute {!r}".format(
- type(obj).__name__, ann, name
- )
- ) from e
- return obj
-
-
-def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]:
- if not isinstance(import_path, str) or "." not in import_path:
- raise TypeError(f"must be absolute import path string, not {import_path!r}")
- module, attr = import_path.rsplit(".", 1)
- target = resolve(module)
- if raising:
- annotated_getattr(target, attr, ann=module)
- return attr, target
-
-
-class Notset:
- def __repr__(self) -> str:
- return "<notset>"
-
-
-notset = Notset()
-
-
-@final
-class MonkeyPatch:
- """Helper to conveniently monkeypatch attributes/items/environment
- variables/syspath.
-
- Returned by the :fixture:`monkeypatch` fixture.
-
- :versionchanged:: 6.2
- Can now also be used directly as `pytest.MonkeyPatch()`, for when
- the fixture is not available. In this case, use
- :meth:`with MonkeyPatch.context() as mp: <context>` or remember to call
- :meth:`undo` explicitly.
- """
-
- def __init__(self) -> None:
- self._setattr: List[Tuple[object, str, object]] = []
- self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = []
- self._cwd: Optional[str] = None
- self._savesyspath: Optional[List[str]] = None
-
- @classmethod
- @contextmanager
- def context(cls) -> Generator["MonkeyPatch", None, None]:
- """Context manager that returns a new :class:`MonkeyPatch` object
- which undoes any patching done inside the ``with`` block upon exit.
-
- Example:
-
- .. code-block:: python
-
- import functools
-
-
- def test_partial(monkeypatch):
- with monkeypatch.context() as m:
- m.setattr(functools, "partial", 3)
-
- Useful in situations where it is desired to undo some patches before the test ends,
- such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
- of this see :issue:`3290`).
- """
- m = cls()
- try:
- yield m
- finally:
- m.undo()
-
- @overload
- def setattr(
- self,
- target: str,
- name: object,
- value: Notset = ...,
- raising: bool = ...,
- ) -> None:
- ...
-
- @overload
- def setattr(
- self,
- target: object,
- name: str,
- value: object,
- raising: bool = ...,
- ) -> None:
- ...
-
- def setattr(
- self,
- target: Union[str, object],
- name: Union[object, str],
- value: object = notset,
- raising: bool = True,
- ) -> None:
- """Set attribute value on target, memorizing the old value.
-
- For convenience you can specify a string as ``target`` which
- will be interpreted as a dotted import path, with the last part
- being the attribute name. For example,
- ``monkeypatch.setattr("os.getcwd", lambda: "/")``
- would set the ``getcwd`` function of the ``os`` module.
-
- Raises AttributeError if the attribute does not exist, unless
- ``raising`` is set to False.
- """
- __tracebackhide__ = True
- import inspect
-
- if isinstance(value, Notset):
- if not isinstance(target, str):
- raise TypeError(
- "use setattr(target, name, value) or "
- "setattr(target, value) with target being a dotted "
- "import string"
- )
- value = name
- name, target = derive_importpath(target, raising)
- else:
- if not isinstance(name, str):
- raise TypeError(
- "use setattr(target, name, value) with name being a string or "
- "setattr(target, value) with target being a dotted "
- "import string"
- )
-
- oldval = getattr(target, name, notset)
- if raising and oldval is notset:
- raise AttributeError(f"{target!r} has no attribute {name!r}")
-
- # avoid class descriptors like staticmethod/classmethod
- if inspect.isclass(target):
- oldval = target.__dict__.get(name, notset)
- self._setattr.append((target, name, oldval))
- setattr(target, name, value)
-
- def delattr(
- self,
- target: Union[object, str],
- name: Union[str, Notset] = notset,
- raising: bool = True,
- ) -> None:
- """Delete attribute ``name`` from ``target``.
-
- If no ``name`` is specified and ``target`` is a string
- it will be interpreted as a dotted import path with the
- last part being the attribute name.
-
- Raises AttributeError it the attribute does not exist, unless
- ``raising`` is set to False.
- """
- __tracebackhide__ = True
- import inspect
-
- if isinstance(name, Notset):
- if not isinstance(target, str):
- raise TypeError(
- "use delattr(target, name) or "
- "delattr(target) with target being a dotted "
- "import string"
- )
- name, target = derive_importpath(target, raising)
-
- if not hasattr(target, name):
- if raising:
- raise AttributeError(name)
- else:
- oldval = getattr(target, name, notset)
- # Avoid class descriptors like staticmethod/classmethod.
- if inspect.isclass(target):
- oldval = target.__dict__.get(name, notset)
- self._setattr.append((target, name, oldval))
- delattr(target, name)
-
- def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None:
- """Set dictionary entry ``name`` to value."""
- self._setitem.append((dic, name, dic.get(name, notset)))
- dic[name] = value
-
- def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None:
- """Delete ``name`` from dict.
-
- Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to
- False.
- """
- if name not in dic:
- if raising:
- raise KeyError(name)
- else:
- self._setitem.append((dic, name, dic.get(name, notset)))
- del dic[name]
-
- def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None:
- """Set environment variable ``name`` to ``value``.
-
- If ``prepend`` is a character, read the current environment variable
- value and prepend the ``value`` adjoined with the ``prepend``
- character.
- """
- if not isinstance(value, str):
- warnings.warn( # type: ignore[unreachable]
- PytestWarning(
- "Value of environment variable {name} type should be str, but got "
- "{value!r} (type: {type}); converted to str implicitly".format(
- name=name, value=value, type=type(value).__name__
- )
- ),
- stacklevel=2,
- )
- value = str(value)
- if prepend and name in os.environ:
- value = value + prepend + os.environ[name]
- self.setitem(os.environ, name, value)
-
- def delenv(self, name: str, raising: bool = True) -> None:
- """Delete ``name`` from the environment.
-
- Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
- False.
- """
- environ: MutableMapping[str, str] = os.environ
- self.delitem(environ, name, raising=raising)
-
- def syspath_prepend(self, path) -> None:
- """Prepend ``path`` to ``sys.path`` list of import locations."""
-
- if self._savesyspath is None:
- self._savesyspath = sys.path[:]
- sys.path.insert(0, str(path))
-
- # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
- # this is only needed when pkg_resources was already loaded by the namespace package
- if "pkg_resources" in sys.modules:
- from pkg_resources import fixup_namespace_packages
-
- fixup_namespace_packages(str(path))
-
- # A call to syspathinsert() usually means that the caller wants to
- # import some dynamically created files, thus with python3 we
- # invalidate its import caches.
- # This is especially important when any namespace package is in use,
- # since then the mtime based FileFinder cache (that gets created in
- # this case already) gets not invalidated when writing the new files
- # quickly afterwards.
- from importlib import invalidate_caches
-
- invalidate_caches()
-
- def chdir(self, path: Union[str, "os.PathLike[str]"]) -> None:
- """Change the current working directory to the specified path.
-
- Path can be a string or a path object.
- """
- if self._cwd is None:
- self._cwd = os.getcwd()
- os.chdir(path)
-
- def undo(self) -> None:
- """Undo previous changes.
-
- This call consumes the undo stack. Calling it a second time has no
- effect unless you do more monkeypatching after the undo call.
-
- There is generally no need to call `undo()`, since it is
- called automatically during tear-down.
-
- Note that the same `monkeypatch` fixture is used across a
- single test function invocation. If `monkeypatch` is used both by
- the test function itself and one of the test fixtures,
- calling `undo()` will undo all of the changes made in
- both functions.
- """
- for obj, name, value in reversed(self._setattr):
- if value is not notset:
- setattr(obj, name, value)
- else:
- delattr(obj, name)
- self._setattr[:] = []
- for dictionary, key, value in reversed(self._setitem):
- if value is notset:
- try:
- del dictionary[key]
- except KeyError:
- pass # Was already deleted, so we have the desired state.
- else:
- dictionary[key] = value
- self._setitem[:] = []
- if self._savesyspath is not None:
- sys.path[:] = self._savesyspath
- self._savesyspath = None
-
- if self._cwd is not None:
- os.chdir(self._cwd)
- self._cwd = None
diff --git a/contrib/python/pytest/py3/_pytest/nodes.py b/contrib/python/pytest/py3/_pytest/nodes.py
deleted file mode 100644
index 1a168043a4..0000000000
--- a/contrib/python/pytest/py3/_pytest/nodes.py
+++ /dev/null
@@ -1,762 +0,0 @@
-import os
-import warnings
-from inspect import signature
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import MutableMapping
-from typing import Optional
-from typing import overload
-from typing import Set
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import _pytest._code
-from _pytest._code import getfslineno
-from _pytest._code.code import ExceptionInfo
-from _pytest._code.code import TerminalRepr
-from _pytest.compat import cached_property
-from _pytest.compat import LEGACY_PATH
-from _pytest.config import Config
-from _pytest.config import ConftestImportFailure
-from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH
-from _pytest.deprecated import NODE_CTOR_FSPATH_ARG
-from _pytest.mark.structures import Mark
-from _pytest.mark.structures import MarkDecorator
-from _pytest.mark.structures import NodeKeywords
-from _pytest.outcomes import fail
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import commonpath
-from _pytest.stash import Stash
-from _pytest.warning_types import PytestWarning
-
-if TYPE_CHECKING:
- # Imported here due to circular import.
- from _pytest.main import Session
- from _pytest._code.code import _TracebackStyle
-
-
-SEP = "/"
-
-tracebackcutdir = Path(_pytest.__file__).parent
-
-
-def iterparentnodeids(nodeid: str) -> Iterator[str]:
- """Return the parent node IDs of a given node ID, inclusive.
-
- For the node ID
-
- "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
-
- the result would be
-
- ""
- "testing"
- "testing/code"
- "testing/code/test_excinfo.py"
- "testing/code/test_excinfo.py::TestFormattedExcinfo"
- "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
-
- Note that / components are only considered until the first ::.
- """
- pos = 0
- first_colons: Optional[int] = nodeid.find("::")
- if first_colons == -1:
- first_colons = None
- # The root Session node - always present.
- yield ""
- # Eagerly consume SEP parts until first colons.
- while True:
- at = nodeid.find(SEP, pos, first_colons)
- if at == -1:
- break
- if at > 0:
- yield nodeid[:at]
- pos = at + len(SEP)
- # Eagerly consume :: parts.
- while True:
- at = nodeid.find("::", pos)
- if at == -1:
- break
- if at > 0:
- yield nodeid[:at]
- pos = at + len("::")
- # The node ID itself.
- if nodeid:
- yield nodeid
-
-
-def _check_path(path: Path, fspath: LEGACY_PATH) -> None:
- if Path(fspath) != path:
- raise ValueError(
- f"Path({fspath!r}) != {path!r}\n"
- "if both path and fspath are given they need to be equal"
- )
-
-
-def _imply_path(
- node_type: Type["Node"],
- path: Optional[Path],
- fspath: Optional[LEGACY_PATH],
-) -> Path:
- if fspath is not None:
- warnings.warn(
- NODE_CTOR_FSPATH_ARG.format(
- node_type_name=node_type.__name__,
- ),
- stacklevel=6,
- )
- if path is not None:
- if fspath is not None:
- _check_path(path, fspath)
- return path
- else:
- assert fspath is not None
- return Path(fspath)
-
-
-_NodeType = TypeVar("_NodeType", bound="Node")
-
-
-class NodeMeta(type):
- def __call__(self, *k, **kw):
- msg = (
- "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n"
- "See "
- "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent"
- " for more details."
- ).format(name=f"{self.__module__}.{self.__name__}")
- fail(msg, pytrace=False)
-
- def _create(self, *k, **kw):
- try:
- return super().__call__(*k, **kw)
- except TypeError:
- sig = signature(getattr(self, "__init__"))
- known_kw = {k: v for k, v in kw.items() if k in sig.parameters}
- from .warning_types import PytestDeprecationWarning
-
- warnings.warn(
- PytestDeprecationWarning(
- f"{self} is not using a cooperative constructor and only takes {set(known_kw)}.\n"
- "See https://docs.pytest.org/en/stable/deprecations.html"
- "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs "
- "for more details."
- )
- )
-
- return super().__call__(*k, **known_kw)
-
-
-class Node(metaclass=NodeMeta):
- """Base class for Collector and Item, the components of the test
- collection tree.
-
- Collector subclasses have children; Items are leaf nodes.
- """
-
- # Implemented in the legacypath plugin.
- #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage
- #: for methods not migrated to ``pathlib.Path`` yet, such as
- #: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer
- #: using :attr:`path` instead.
- fspath: LEGACY_PATH
-
- # Use __slots__ to make attribute access faster.
- # Note that __dict__ is still available.
- __slots__ = (
- "name",
- "parent",
- "config",
- "session",
- "path",
- "_nodeid",
- "_store",
- "__dict__",
- )
-
- def __init__(
- self,
- name: str,
- parent: "Optional[Node]" = None,
- config: Optional[Config] = None,
- session: "Optional[Session]" = None,
- fspath: Optional[LEGACY_PATH] = None,
- path: Optional[Path] = None,
- nodeid: Optional[str] = None,
- ) -> None:
- #: A unique name within the scope of the parent node.
- self.name = name
-
- #: The parent collector node.
- self.parent = parent
-
- if config:
- #: The pytest config object.
- self.config: Config = config
- else:
- if not parent:
- raise TypeError("config or parent must be provided")
- self.config = parent.config
-
- if session:
- #: The pytest session this node is part of.
- self.session = session
- else:
- if not parent:
- raise TypeError("session or parent must be provided")
- self.session = parent.session
-
- if path is None and fspath is None:
- path = getattr(parent, "path", None)
- #: Filesystem path where this node was collected from (can be None).
- self.path: Path = _imply_path(type(self), path, fspath=fspath)
-
- # The explicit annotation is to avoid publicly exposing NodeKeywords.
- #: Keywords/markers collected from all scopes.
- self.keywords: MutableMapping[str, Any] = NodeKeywords(self)
-
- #: The marker objects belonging to this node.
- self.own_markers: List[Mark] = []
-
- #: Allow adding of extra keywords to use for matching.
- self.extra_keyword_matches: Set[str] = set()
-
- if nodeid is not None:
- assert "::()" not in nodeid
- self._nodeid = nodeid
- else:
- if not self.parent:
- raise TypeError("nodeid or parent must be provided")
- self._nodeid = self.parent.nodeid + "::" + self.name
-
- #: A place where plugins can store information on the node for their
- #: own use.
- #:
- #: :type: Stash
- self.stash = Stash()
- # Deprecated alias. Was never public. Can be removed in a few releases.
- self._store = self.stash
-
- @classmethod
- def from_parent(cls, parent: "Node", **kw):
- """Public constructor for Nodes.
-
- This indirection got introduced in order to enable removing
- the fragile logic from the node constructors.
-
- Subclasses can use ``super().from_parent(...)`` when overriding the
- construction.
-
- :param parent: The parent node of this Node.
- """
- if "config" in kw:
- raise TypeError("config is not a valid argument for from_parent")
- if "session" in kw:
- raise TypeError("session is not a valid argument for from_parent")
- return cls._create(parent=parent, **kw)
-
- @property
- def ihook(self):
- """fspath-sensitive hook proxy used to call pytest hooks."""
- return self.session.gethookproxy(self.path)
-
- def __repr__(self) -> str:
- return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
-
- def warn(self, warning: Warning) -> None:
- """Issue a warning for this Node.
-
- Warnings will be displayed after the test session, unless explicitly suppressed.
-
- :param Warning warning:
- The warning instance to issue.
-
- :raises ValueError: If ``warning`` instance is not a subclass of Warning.
-
- Example usage:
-
- .. code-block:: python
-
- node.warn(PytestWarning("some message"))
- node.warn(UserWarning("some message"))
-
- .. versionchanged:: 6.2
- Any subclass of :class:`Warning` is now accepted, rather than only
- :class:`PytestWarning <pytest.PytestWarning>` subclasses.
- """
- # enforce type checks here to avoid getting a generic type error later otherwise.
- if not isinstance(warning, Warning):
- raise ValueError(
- "warning must be an instance of Warning or subclass, got {!r}".format(
- warning
- )
- )
- path, lineno = get_fslocation_from_item(self)
- assert lineno is not None
- warnings.warn_explicit(
- warning,
- category=None,
- filename=str(path),
- lineno=lineno + 1,
- )
-
- # Methods for ordering nodes.
-
- @property
- def nodeid(self) -> str:
- """A ::-separated string denoting its collection tree address."""
- return self._nodeid
-
- def __hash__(self) -> int:
- return hash(self._nodeid)
-
- def setup(self) -> None:
- pass
-
- def teardown(self) -> None:
- pass
-
- def listchain(self) -> List["Node"]:
- """Return list of all parent collectors up to self, starting from
- the root of collection tree."""
- chain = []
- item: Optional[Node] = self
- while item is not None:
- chain.append(item)
- item = item.parent
- chain.reverse()
- return chain
-
- def add_marker(
- self, marker: Union[str, MarkDecorator], append: bool = True
- ) -> None:
- """Dynamically add a marker object to the node.
-
- :param append:
- Whether to append the marker, or prepend it.
- """
- from _pytest.mark import MARK_GEN
-
- if isinstance(marker, MarkDecorator):
- marker_ = marker
- elif isinstance(marker, str):
- marker_ = getattr(MARK_GEN, marker)
- else:
- raise ValueError("is not a string or pytest.mark.* Marker")
- self.keywords[marker_.name] = marker_
- if append:
- self.own_markers.append(marker_.mark)
- else:
- self.own_markers.insert(0, marker_.mark)
-
- def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]:
- """Iterate over all markers of the node.
-
- :param name: If given, filter the results by the name attribute.
- """
- return (x[1] for x in self.iter_markers_with_node(name=name))
-
- def iter_markers_with_node(
- self, name: Optional[str] = None
- ) -> Iterator[Tuple["Node", Mark]]:
- """Iterate over all markers of the node.
-
- :param name: If given, filter the results by the name attribute.
- :returns: An iterator of (node, mark) tuples.
- """
- for node in reversed(self.listchain()):
- for mark in node.own_markers:
- if name is None or getattr(mark, "name", None) == name:
- yield node, mark
-
- @overload
- def get_closest_marker(self, name: str) -> Optional[Mark]:
- ...
-
- @overload
- def get_closest_marker(self, name: str, default: Mark) -> Mark:
- ...
-
- def get_closest_marker(
- self, name: str, default: Optional[Mark] = None
- ) -> Optional[Mark]:
- """Return the first marker matching the name, from closest (for
- example function) to farther level (for example module level).
-
- :param default: Fallback return value if no marker was found.
- :param name: Name to filter by.
- """
- return next(self.iter_markers(name=name), default)
-
- def listextrakeywords(self) -> Set[str]:
- """Return a set of all extra keywords in self and any parents."""
- extra_keywords: Set[str] = set()
- for item in self.listchain():
- extra_keywords.update(item.extra_keyword_matches)
- return extra_keywords
-
- def listnames(self) -> List[str]:
- return [x.name for x in self.listchain()]
-
- def addfinalizer(self, fin: Callable[[], object]) -> None:
- """Register a function to be called when this node is finalized.
-
- This method can only be called when this node is active
- in a setup chain, for example during self.setup().
- """
- self.session._setupstate.addfinalizer(fin, self)
-
- def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]:
- """Get the next parent node (including self) which is an instance of
- the given class."""
- current: Optional[Node] = self
- while current and not isinstance(current, cls):
- current = current.parent
- assert current is None or isinstance(current, cls)
- return current
-
- def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None:
- pass
-
- def _repr_failure_py(
- self,
- excinfo: ExceptionInfo[BaseException],
- style: "Optional[_TracebackStyle]" = None,
- ) -> TerminalRepr:
- from _pytest.fixtures import FixtureLookupError
-
- if isinstance(excinfo.value, ConftestImportFailure):
- excinfo = ExceptionInfo.from_exc_info(excinfo.value.excinfo)
- if isinstance(excinfo.value, fail.Exception):
- if not excinfo.value.pytrace:
- style = "value"
- if isinstance(excinfo.value, FixtureLookupError):
- return excinfo.value.formatrepr()
- if self.config.getoption("fulltrace", False):
- style = "long"
- else:
- tb = _pytest._code.Traceback([excinfo.traceback[-1]])
- self._prunetraceback(excinfo)
- if len(excinfo.traceback) == 0:
- excinfo.traceback = tb
- if style == "auto":
- style = "long"
- # XXX should excinfo.getrepr record all data and toterminal() process it?
- if style is None:
- if self.config.getoption("tbstyle", "auto") == "short":
- style = "short"
- else:
- style = "long"
-
- if self.config.getoption("verbose", 0) > 1:
- truncate_locals = False
- else:
- truncate_locals = True
-
- # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False.
- # It is possible for a fixture/test to change the CWD while this code runs, which
- # would then result in the user seeing confusing paths in the failure message.
- # To fix this, if the CWD changed, always display the full absolute path.
- # It will be better to just always display paths relative to invocation_dir, but
- # this requires a lot of plumbing (#6428).
- try:
- abspath = Path(os.getcwd()) != self.config.invocation_params.dir
- except OSError:
- abspath = True
-
- return excinfo.getrepr(
- funcargs=True,
- abspath=abspath,
- showlocals=self.config.getoption("showlocals", False),
- style=style,
- tbfilter=False, # pruned already, or in --fulltrace mode.
- truncate_locals=truncate_locals,
- )
-
- def repr_failure(
- self,
- excinfo: ExceptionInfo[BaseException],
- style: "Optional[_TracebackStyle]" = None,
- ) -> Union[str, TerminalRepr]:
- """Return a representation of a collection or test failure.
-
- .. seealso:: :ref:`non-python tests`
-
- :param excinfo: Exception information for the failure.
- """
- return self._repr_failure_py(excinfo, style)
-
-
-def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[int]]:
- """Try to extract the actual location from a node, depending on available attributes:
-
- * "location": a pair (path, lineno)
- * "obj": a Python object that the node wraps.
- * "fspath": just a path
-
- :rtype: A tuple of (str|Path, int) with filename and line number.
- """
- # See Item.location.
- location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None)
- if location is not None:
- return location[:2]
- obj = getattr(node, "obj", None)
- if obj is not None:
- return getfslineno(obj)
- return getattr(node, "fspath", "unknown location"), -1
-
-
-class Collector(Node):
- """Collector instances create children through collect() and thus
- iteratively build a tree."""
-
- class CollectError(Exception):
- """An error during collection, contains a custom message."""
-
- def collect(self) -> Iterable[Union["Item", "Collector"]]:
- """Return a list of children (items and collectors) for this
- collection node."""
- raise NotImplementedError("abstract")
-
- # TODO: This omits the style= parameter which breaks Liskov Substitution.
- def repr_failure( # type: ignore[override]
- self, excinfo: ExceptionInfo[BaseException]
- ) -> Union[str, TerminalRepr]:
- """Return a representation of a collection failure.
-
- :param excinfo: Exception information for the failure.
- """
- if isinstance(excinfo.value, self.CollectError) and not self.config.getoption(
- "fulltrace", False
- ):
- exc = excinfo.value
- return str(exc.args[0])
-
- # Respect explicit tbstyle option, but default to "short"
- # (_repr_failure_py uses "long" with "fulltrace" option always).
- tbstyle = self.config.getoption("tbstyle", "auto")
- if tbstyle == "auto":
- tbstyle = "short"
-
- return self._repr_failure_py(excinfo, style=tbstyle)
-
- def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None:
- if hasattr(self, "path"):
- traceback = excinfo.traceback
- ntraceback = traceback.cut(path=self.path)
- if ntraceback == traceback:
- ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
- excinfo.traceback = ntraceback.filter()
-
-
-def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[str]:
- for initial_path in session._initialpaths:
- if commonpath(path, initial_path) == initial_path:
- rel = str(path.relative_to(initial_path))
- return "" if rel == "." else rel
- return None
-
-
-class FSCollector(Collector):
- def __init__(
- self,
- fspath: Optional[LEGACY_PATH] = None,
- path_or_parent: Optional[Union[Path, Node]] = None,
- path: Optional[Path] = None,
- name: Optional[str] = None,
- parent: Optional[Node] = None,
- config: Optional[Config] = None,
- session: Optional["Session"] = None,
- nodeid: Optional[str] = None,
- ) -> None:
- if path_or_parent:
- if isinstance(path_or_parent, Node):
- assert parent is None
- parent = cast(FSCollector, path_or_parent)
- elif isinstance(path_or_parent, Path):
- assert path is None
- path = path_or_parent
-
- path = _imply_path(type(self), path, fspath=fspath)
- if name is None:
- name = path.name
- if parent is not None and parent.path != path:
- try:
- rel = path.relative_to(parent.path)
- except ValueError:
- pass
- else:
- name = str(rel)
- name = name.replace(os.sep, SEP)
- self.path = path
-
- if session is None:
- assert parent is not None
- session = parent.session
-
- if nodeid is None:
- try:
- nodeid = str(self.path.relative_to(session.config.rootpath))
- except ValueError:
- nodeid = _check_initialpaths_for_relpath(session, path)
-
- if nodeid and os.sep != SEP:
- nodeid = nodeid.replace(os.sep, SEP)
-
- super().__init__(
- name=name,
- parent=parent,
- config=config,
- session=session,
- nodeid=nodeid,
- path=path,
- )
-
- @classmethod
- def from_parent(
- cls,
- parent,
- *,
- fspath: Optional[LEGACY_PATH] = None,
- path: Optional[Path] = None,
- **kw,
- ):
- """The public constructor."""
- return super().from_parent(parent=parent, fspath=fspath, path=path, **kw)
-
- def gethookproxy(self, fspath: "os.PathLike[str]"):
- warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
- return self.session.gethookproxy(fspath)
-
- def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
- warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
- return self.session.isinitpath(path)
-
-
-class File(FSCollector):
- """Base class for collecting tests from a file.
-
- :ref:`non-python tests`.
- """
-
-
-class Item(Node):
- """A basic test invocation item.
-
- Note that for a single function there might be multiple test invocation items.
- """
-
- nextitem = None
-
- def __init__(
- self,
- name,
- parent=None,
- config: Optional[Config] = None,
- session: Optional["Session"] = None,
- nodeid: Optional[str] = None,
- **kw,
- ) -> None:
- # The first two arguments are intentionally passed positionally,
- # to keep plugins who define a node type which inherits from
- # (pytest.Item, pytest.File) working (see issue #8435).
- # They can be made kwargs when the deprecation above is done.
- super().__init__(
- name,
- parent,
- config=config,
- session=session,
- nodeid=nodeid,
- **kw,
- )
- self._report_sections: List[Tuple[str, str, str]] = []
-
- #: A list of tuples (name, value) that holds user defined properties
- #: for this test.
- self.user_properties: List[Tuple[str, object]] = []
-
- self._check_item_and_collector_diamond_inheritance()
-
- def _check_item_and_collector_diamond_inheritance(self) -> None:
- """
- Check if the current type inherits from both File and Collector
- at the same time, emitting a warning accordingly (#8447).
- """
- cls = type(self)
-
- # We inject an attribute in the type to avoid issuing this warning
- # for the same class more than once, which is not helpful.
- # It is a hack, but was deemed acceptable in order to avoid
- # flooding the user in the common case.
- attr_name = "_pytest_diamond_inheritance_warning_shown"
- if getattr(cls, attr_name, False):
- return
- setattr(cls, attr_name, True)
-
- problems = ", ".join(
- base.__name__ for base in cls.__bases__ if issubclass(base, Collector)
- )
- if problems:
- warnings.warn(
- f"{cls.__name__} is an Item subclass and should not be a collector, "
- f"however its bases {problems} are collectors.\n"
- "Please split the Collectors and the Item into separate node types.\n"
- "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n"
- "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/",
- PytestWarning,
- )
-
- def runtest(self) -> None:
- """Run the test case for this item.
-
- Must be implemented by subclasses.
-
- .. seealso:: :ref:`non-python tests`
- """
- raise NotImplementedError("runtest must be implemented by Item subclass")
-
- def add_report_section(self, when: str, key: str, content: str) -> None:
- """Add a new report section, similar to what's done internally to add
- stdout and stderr captured output::
-
- item.add_report_section("call", "stdout", "report section contents")
-
- :param str when:
- One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
- :param str key:
- Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
- ``"stderr"`` internally.
- :param str content:
- The full contents as a string.
- """
- if content:
- self._report_sections.append((when, key, content))
-
- def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
- """Get location information for this item for test reports.
-
- Returns a tuple with three elements:
-
- - The path of the test (default ``self.path``)
- - The line number of the test (default ``None``)
- - A name of the test to be shown (default ``""``)
-
- .. seealso:: :ref:`non-python tests`
- """
- return self.path, None, ""
-
- @cached_property
- def location(self) -> Tuple[str, Optional[int], str]:
- location = self.reportinfo()
- path = absolutepath(os.fspath(location[0]))
- relfspath = self.session._node_location_to_relpath(path)
- assert type(location[2]) is str
- return (relfspath, location[1], location[2])
diff --git a/contrib/python/pytest/py3/_pytest/nose.py b/contrib/python/pytest/py3/_pytest/nose.py
deleted file mode 100644
index b0699d22bd..0000000000
--- a/contrib/python/pytest/py3/_pytest/nose.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Run testsuites written for nose."""
-from _pytest.config import hookimpl
-from _pytest.fixtures import getfixturemarker
-from _pytest.nodes import Item
-from _pytest.python import Function
-from _pytest.unittest import TestCaseFunction
-
-
-@hookimpl(trylast=True)
-def pytest_runtest_setup(item: Item) -> None:
- if not isinstance(item, Function):
- return
- # Don't do nose style setup/teardown on direct unittest style classes.
- if isinstance(item, TestCaseFunction):
- return
-
- # Capture the narrowed type of item for the teardown closure,
- # see https://github.com/python/mypy/issues/2608
- func = item
-
- call_optional(func.obj, "setup")
- func.addfinalizer(lambda: call_optional(func.obj, "teardown"))
-
- # NOTE: Module- and class-level fixtures are handled in python.py
- # with `pluginmanager.has_plugin("nose")` checks.
- # It would have been nicer to implement them outside of core, but
- # it's not straightforward.
-
-
-def call_optional(obj: object, name: str) -> bool:
- method = getattr(obj, name, None)
- if method is None:
- return False
- is_fixture = getfixturemarker(method) is not None
- if is_fixture:
- return False
- if not callable(method):
- return False
- # If there are any problems allow the exception to raise rather than
- # silently ignoring it.
- method()
- return True
diff --git a/contrib/python/pytest/py3/_pytest/outcomes.py b/contrib/python/pytest/py3/_pytest/outcomes.py
deleted file mode 100644
index 25206fe0e8..0000000000
--- a/contrib/python/pytest/py3/_pytest/outcomes.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""Exception classes and constants handling test outcomes as well as
-functions creating them."""
-import sys
-import warnings
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Optional
-from typing import Type
-from typing import TypeVar
-
-from _pytest.deprecated import KEYWORD_MSG_ARG
-
-TYPE_CHECKING = False # Avoid circular import through compat.
-
-if TYPE_CHECKING:
- from typing import NoReturn
- from typing_extensions import Protocol
-else:
- # typing.Protocol is only available starting from Python 3.8. It is also
- # available from typing_extensions, but we don't want a runtime dependency
- # on that. So use a dummy runtime implementation.
- from typing import Generic
-
- Protocol = Generic
-
-
-class OutcomeException(BaseException):
- """OutcomeException and its subclass instances indicate and contain info
- about test and collection outcomes."""
-
- def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None:
- if msg is not None and not isinstance(msg, str):
- error_msg = ( # type: ignore[unreachable]
- "{} expected string as 'msg' parameter, got '{}' instead.\n"
- "Perhaps you meant to use a mark?"
- )
- raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__))
- super().__init__(msg)
- self.msg = msg
- self.pytrace = pytrace
-
- def __repr__(self) -> str:
- if self.msg is not None:
- return self.msg
- return f"<{self.__class__.__name__} instance>"
-
- __str__ = __repr__
-
-
-TEST_OUTCOME = (OutcomeException, Exception)
-
-
-class Skipped(OutcomeException):
- # XXX hackish: on 3k we fake to live in the builtins
- # in order to have Skipped exception printing shorter/nicer
- __module__ = "builtins"
-
- def __init__(
- self,
- msg: Optional[str] = None,
- pytrace: bool = True,
- allow_module_level: bool = False,
- *,
- _use_item_location: bool = False,
- ) -> None:
- super().__init__(msg=msg, pytrace=pytrace)
- self.allow_module_level = allow_module_level
- # If true, the skip location is reported as the item's location,
- # instead of the place that raises the exception/calls skip().
- self._use_item_location = _use_item_location
-
-
-class Failed(OutcomeException):
- """Raised from an explicit call to pytest.fail()."""
-
- __module__ = "builtins"
-
-
-class Exit(Exception):
- """Raised for immediate program exits (no tracebacks/summaries)."""
-
- def __init__(
- self, msg: str = "unknown reason", returncode: Optional[int] = None
- ) -> None:
- self.msg = msg
- self.returncode = returncode
- super().__init__(msg)
-
-
-# Elaborate hack to work around https://github.com/python/mypy/issues/2087.
-# Ideally would just be `exit.Exception = Exit` etc.
-
-_F = TypeVar("_F", bound=Callable[..., object])
-_ET = TypeVar("_ET", bound=Type[BaseException])
-
-
-class _WithException(Protocol[_F, _ET]):
- Exception: _ET
- __call__: _F
-
-
-def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]:
- def decorate(func: _F) -> _WithException[_F, _ET]:
- func_with_exception = cast(_WithException[_F, _ET], func)
- func_with_exception.Exception = exception_type
- return func_with_exception
-
- return decorate
-
-
-# Exposed helper methods.
-
-
-@_with_exception(Exit)
-def exit(
- reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None
-) -> "NoReturn":
- """Exit testing process.
-
- :param reason:
- The message to show as the reason for exiting pytest. reason has a default value
- only because `msg` is deprecated.
-
- :param returncode:
- Return code to be used when exiting pytest.
-
- :param msg:
- Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead.
- """
- __tracebackhide__ = True
- from _pytest.config import UsageError
-
- if reason and msg:
- raise UsageError(
- "cannot pass reason and msg to exit(), `msg` is deprecated, use `reason`."
- )
- if not reason:
- if msg is None:
- raise UsageError("exit() requires a reason argument")
- warnings.warn(KEYWORD_MSG_ARG.format(func="exit"), stacklevel=2)
- reason = msg
- raise Exit(reason, returncode)
-
-
-@_with_exception(Skipped)
-def skip(
- reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None
-) -> "NoReturn":
- """Skip an executing test with the given message.
-
- This function should be called only during testing (setup, call or teardown) or
- during collection by using the ``allow_module_level`` flag. This function can
- be called in doctests as well.
-
- :param reason:
- The message to show the user as reason for the skip.
-
- :param allow_module_level:
- Allows this function to be called at module level, skipping the rest
- of the module. Defaults to False.
-
- :param msg:
- Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead.
-
- .. note::
- It is better to use the :ref:`pytest.mark.skipif ref` marker when
- possible to declare a test to be skipped under certain conditions
- like mismatching platforms or dependencies.
- Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`)
- to skip a doctest statically.
- """
- __tracebackhide__ = True
- reason = _resolve_msg_to_reason("skip", reason, msg)
- raise Skipped(msg=reason, allow_module_level=allow_module_level)
-
-
-@_with_exception(Failed)
-def fail(
- reason: str = "", pytrace: bool = True, msg: Optional[str] = None
-) -> "NoReturn":
- """Explicitly fail an executing test with the given message.
-
- :param reason:
- The message to show the user as reason for the failure.
-
- :param pytrace:
- If False, msg represents the full failure information and no
- python traceback will be reported.
-
- :param msg:
- Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead.
- """
- __tracebackhide__ = True
- reason = _resolve_msg_to_reason("fail", reason, msg)
- raise Failed(msg=reason, pytrace=pytrace)
-
-
-def _resolve_msg_to_reason(
- func_name: str, reason: str, msg: Optional[str] = None
-) -> str:
- """
- Handles converting the deprecated msg parameter if provided into
- reason, raising a deprecation warning. This function will be removed
- when the optional msg argument is removed from here in future.
-
- :param str func_name:
- The name of the offending function, this is formatted into the deprecation message.
-
- :param str reason:
- The reason= passed into either pytest.fail() or pytest.skip()
-
- :param str msg:
- The msg= passed into either pytest.fail() or pytest.skip(). This will
- be converted into reason if it is provided to allow pytest.skip(msg=) or
- pytest.fail(msg=) to continue working in the interim period.
-
- :returns:
- The value to use as reason.
-
- """
- __tracebackhide__ = True
- if msg is not None:
-
- if reason:
- from pytest import UsageError
-
- raise UsageError(
- f"Passing both ``reason`` and ``msg`` to pytest.{func_name}(...) is not permitted."
- )
- warnings.warn(KEYWORD_MSG_ARG.format(func=func_name), stacklevel=3)
- reason = msg
- return reason
-
-
-class XFailed(Failed):
- """Raised from an explicit call to pytest.xfail()."""
-
-
-@_with_exception(XFailed)
-def xfail(reason: str = "") -> "NoReturn":
- """Imperatively xfail an executing test or setup function with the given reason.
-
- This function should be called only during testing (setup, call or teardown).
-
- .. note::
- It is better to use the :ref:`pytest.mark.xfail ref` marker when
- possible to declare a test to be xfailed under certain conditions
- like known bugs or missing features.
- """
- __tracebackhide__ = True
- raise XFailed(reason)
-
-
-def importorskip(
- modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
-) -> Any:
- """Import and return the requested module ``modname``, or skip the
- current test if the module cannot be imported.
-
- :param str modname:
- The name of the module to import.
- :param str minversion:
- If given, the imported module's ``__version__`` attribute must be at
- least this minimal version, otherwise the test is still skipped.
- :param str reason:
- If given, this reason is shown as the message when the module cannot
- be imported.
-
- :returns:
- The imported module. This should be assigned to its canonical name.
-
- Example::
-
- docutils = pytest.importorskip("docutils")
- """
- import warnings
-
- __tracebackhide__ = True
- compile(modname, "", "eval") # to catch syntaxerrors
-
- with warnings.catch_warnings():
- # Make sure to ignore ImportWarnings that might happen because
- # of existing directories with the same name we're trying to
- # import but without a __init__.py file.
- warnings.simplefilter("ignore")
- try:
- __import__(modname)
- except ImportError as exc:
- if reason is None:
- reason = f"could not import {modname!r}: {exc}"
- raise Skipped(reason, allow_module_level=True) from None
- mod = sys.modules[modname]
- if minversion is None:
- return mod
- verattr = getattr(mod, "__version__", None)
- if minversion is not None:
- # Imported lazily to improve start-up time.
- from packaging.version import Version
-
- if verattr is None or Version(verattr) < Version(minversion):
- raise Skipped(
- "module %r has __version__ %r, required is: %r"
- % (modname, verattr, minversion),
- allow_module_level=True,
- )
- return mod
diff --git a/contrib/python/pytest/py3/_pytest/pastebin.py b/contrib/python/pytest/py3/_pytest/pastebin.py
deleted file mode 100644
index 385b3022cc..0000000000
--- a/contrib/python/pytest/py3/_pytest/pastebin.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""Submit failure or test session information to a pastebin service."""
-import tempfile
-from io import StringIO
-from typing import IO
-from typing import Union
-
-import pytest
-from _pytest.config import Config
-from _pytest.config import create_terminal_writer
-from _pytest.config.argparsing import Parser
-from _pytest.stash import StashKey
-from _pytest.terminal import TerminalReporter
-
-
-pastebinfile_key = StashKey[IO[bytes]]()
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("terminal reporting")
- group._addoption(
- "--pastebin",
- metavar="mode",
- action="store",
- dest="pastebin",
- default=None,
- choices=["failed", "all"],
- help="send failed|all info to bpaste.net pastebin service.",
- )
-
-
-@pytest.hookimpl(trylast=True)
-def pytest_configure(config: Config) -> None:
- if config.option.pastebin == "all":
- tr = config.pluginmanager.getplugin("terminalreporter")
- # If no terminal reporter plugin is present, nothing we can do here;
- # this can happen when this function executes in a worker node
- # when using pytest-xdist, for example.
- if tr is not None:
- # pastebin file will be UTF-8 encoded binary file.
- config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b")
- oldwrite = tr._tw.write
-
- def tee_write(s, **kwargs):
- oldwrite(s, **kwargs)
- if isinstance(s, str):
- s = s.encode("utf-8")
- config.stash[pastebinfile_key].write(s)
-
- tr._tw.write = tee_write
-
-
-def pytest_unconfigure(config: Config) -> None:
- if pastebinfile_key in config.stash:
- pastebinfile = config.stash[pastebinfile_key]
- # Get terminal contents and delete file.
- pastebinfile.seek(0)
- sessionlog = pastebinfile.read()
- pastebinfile.close()
- del config.stash[pastebinfile_key]
- # Undo our patching in the terminal reporter.
- tr = config.pluginmanager.getplugin("terminalreporter")
- del tr._tw.__dict__["write"]
- # Write summary.
- tr.write_sep("=", "Sending information to Paste Service")
- pastebinurl = create_new_paste(sessionlog)
- tr.write_line("pastebin session-log: %s\n" % pastebinurl)
-
-
-def create_new_paste(contents: Union[str, bytes]) -> str:
- """Create a new paste using the bpaste.net service.
-
- :contents: Paste contents string.
- :returns: URL to the pasted contents, or an error message.
- """
- import re
- from urllib.request import urlopen
- from urllib.parse import urlencode
-
- params = {"code": contents, "lexer": "text", "expiry": "1week"}
- url = "https://bpa.st"
- try:
- response: str = (
- urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8")
- )
- except OSError as exc_info: # urllib errors
- return "bad response: %s" % exc_info
- m = re.search(r'href="/raw/(\w+)"', response)
- if m:
- return f"{url}/show/{m.group(1)}"
- else:
- return "bad response: invalid format ('" + response + "')"
-
-
-def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
- if terminalreporter.config.option.pastebin != "failed":
- return
- if "failed" in terminalreporter.stats:
- terminalreporter.write_sep("=", "Sending information to Paste Service")
- for rep in terminalreporter.stats["failed"]:
- try:
- msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
- except AttributeError:
- msg = terminalreporter._getfailureheadline(rep)
- file = StringIO()
- tw = create_terminal_writer(terminalreporter.config, file)
- rep.toterminal(tw)
- s = file.getvalue()
- assert len(s)
- pastebinurl = create_new_paste(s)
- terminalreporter.write_line(f"{msg} --> {pastebinurl}")
diff --git a/contrib/python/pytest/py3/_pytest/pathlib.py b/contrib/python/pytest/py3/_pytest/pathlib.py
deleted file mode 100644
index c5a411b596..0000000000
--- a/contrib/python/pytest/py3/_pytest/pathlib.py
+++ /dev/null
@@ -1,735 +0,0 @@
-import atexit
-import contextlib
-import fnmatch
-import importlib.util
-import itertools
-import os
-import shutil
-import sys
-import uuid
-import warnings
-from enum import Enum
-from errno import EBADF
-from errno import ELOOP
-from errno import ENOENT
-from errno import ENOTDIR
-from functools import partial
-from os.path import expanduser
-from os.path import expandvars
-from os.path import isabs
-from os.path import sep
-from pathlib import Path
-from pathlib import PurePath
-from posixpath import sep as posix_sep
-from types import ModuleType
-from typing import Callable
-from typing import Dict
-from typing import Iterable
-from typing import Iterator
-from typing import Optional
-from typing import Set
-from typing import TypeVar
-from typing import Union
-
-from _pytest.compat import assert_never
-from _pytest.outcomes import skip
-from _pytest.warning_types import PytestWarning
-
-LOCK_TIMEOUT = 60 * 60 * 24 * 3
-
-
-_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
-
-# The following function, variables and comments were
-# copied from cpython 3.9 Lib/pathlib.py file.
-
-# EBADF - guard against macOS `stat` throwing EBADF
-_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP)
-
-_IGNORED_WINERRORS = (
- 21, # ERROR_NOT_READY - drive exists but is not accessible
- 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
-)
-
-
-def _ignore_error(exception):
- return (
- getattr(exception, "errno", None) in _IGNORED_ERRORS
- or getattr(exception, "winerror", None) in _IGNORED_WINERRORS
- )
-
-
-def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
- return path.joinpath(".lock")
-
-
-def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
- """Handle known read-only errors during rmtree.
-
- The returned value is used only by our own tests.
- """
- exctype, excvalue = exc[:2]
-
- # Another process removed the file in the middle of the "rm_rf" (xdist for example).
- # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
- if isinstance(excvalue, FileNotFoundError):
- return False
-
- if not isinstance(excvalue, PermissionError):
- warnings.warn(
- PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}")
- )
- return False
-
- if func not in (os.rmdir, os.remove, os.unlink):
- if func not in (os.open,):
- warnings.warn(
- PytestWarning(
- "(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
- func, path, exctype, excvalue
- )
- )
- )
- return False
-
- # Chmod + retry.
- import stat
-
- def chmod_rw(p: str) -> None:
- mode = os.stat(p).st_mode
- os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
-
- # For files, we need to recursively go upwards in the directories to
- # ensure they all are also writable.
- p = Path(path)
- if p.is_file():
- for parent in p.parents:
- chmod_rw(str(parent))
- # Stop when we reach the original path passed to rm_rf.
- if parent == start_path:
- break
- chmod_rw(str(path))
-
- func(path)
- return True
-
-
-def ensure_extended_length_path(path: Path) -> Path:
- """Get the extended-length version of a path (Windows).
-
- On Windows, by default, the maximum length of a path (MAX_PATH) is 260
- characters, and operations on paths longer than that fail. But it is possible
- to overcome this by converting the path to "extended-length" form before
- performing the operation:
- https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
-
- On Windows, this function returns the extended-length absolute version of path.
- On other platforms it returns path unchanged.
- """
- if sys.platform.startswith("win32"):
- path = path.resolve()
- path = Path(get_extended_length_path_str(str(path)))
- return path
-
-
-def get_extended_length_path_str(path: str) -> str:
- """Convert a path to a Windows extended length path."""
- long_path_prefix = "\\\\?\\"
- unc_long_path_prefix = "\\\\?\\UNC\\"
- if path.startswith((long_path_prefix, unc_long_path_prefix)):
- return path
- # UNC
- if path.startswith("\\\\"):
- return unc_long_path_prefix + path[2:]
- return long_path_prefix + path
-
-
-def rm_rf(path: Path) -> None:
- """Remove the path contents recursively, even if some elements
- are read-only."""
- path = ensure_extended_length_path(path)
- onerror = partial(on_rm_rf_error, start_path=path)
- shutil.rmtree(str(path), onerror=onerror)
-
-
-def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
- """Find all elements in root that begin with the prefix, case insensitive."""
- l_prefix = prefix.lower()
- for x in root.iterdir():
- if x.name.lower().startswith(l_prefix):
- yield x
-
-
-def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
- """Return the parts of the paths following the prefix.
-
- :param iter: Iterator over path names.
- :param prefix: Expected prefix of the path names.
- """
- p_len = len(prefix)
- for p in iter:
- yield p.name[p_len:]
-
-
-def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
- """Combine find_prefixes and extract_suffixes."""
- return extract_suffixes(find_prefixed(root, prefix), prefix)
-
-
-def parse_num(maybe_num) -> int:
- """Parse number path suffixes, returns -1 on error."""
- try:
- return int(maybe_num)
- except ValueError:
- return -1
-
-
-def _force_symlink(
- root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
-) -> None:
- """Helper to create the current symlink.
-
- It's full of race conditions that are reasonably OK to ignore
- for the context of best effort linking to the latest test run.
-
- The presumption being that in case of much parallelism
- the inaccuracy is going to be acceptable.
- """
- current_symlink = root.joinpath(target)
- try:
- current_symlink.unlink()
- except OSError:
- pass
- try:
- current_symlink.symlink_to(link_to)
- except Exception:
- pass
-
-
-def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path:
- """Create a directory with an increased number as suffix for the given prefix."""
- for i in range(10):
- # try up to 10 times to create the folder
- max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
- new_number = max_existing + 1
- new_path = root.joinpath(f"{prefix}{new_number}")
- try:
- new_path.mkdir(mode=mode)
- except Exception:
- pass
- else:
- _force_symlink(root, prefix + "current", new_path)
- return new_path
- else:
- raise OSError(
- "could not create numbered dir with prefix "
- "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
- )
-
-
-def create_cleanup_lock(p: Path) -> Path:
- """Create a lock to prevent premature folder cleanup."""
- lock_path = get_lock_path(p)
- try:
- fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
- except FileExistsError as e:
- raise OSError(f"cannot create lockfile in {p}") from e
- else:
- pid = os.getpid()
- spid = str(pid).encode()
- os.write(fd, spid)
- os.close(fd)
- if not lock_path.is_file():
- raise OSError("lock path got renamed after successful creation")
- return lock_path
-
-
-def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
- """Register a cleanup function for removing a lock, by default on atexit."""
- pid = os.getpid()
-
- def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
- current_pid = os.getpid()
- if current_pid != original_pid:
- # fork
- return
- try:
- lock_path.unlink()
- except OSError:
- pass
-
- return register(cleanup_on_exit)
-
-
-def maybe_delete_a_numbered_dir(path: Path) -> None:
- """Remove a numbered directory if its lock can be obtained and it does
- not seem to be in use."""
- path = ensure_extended_length_path(path)
- lock_path = None
- try:
- lock_path = create_cleanup_lock(path)
- parent = path.parent
-
- garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
- path.rename(garbage)
- rm_rf(garbage)
- except OSError:
- # known races:
- # * other process did a cleanup at the same time
- # * deletable folder was found
- # * process cwd (Windows)
- return
- finally:
- # If we created the lock, ensure we remove it even if we failed
- # to properly remove the numbered dir.
- if lock_path is not None:
- try:
- lock_path.unlink()
- except OSError:
- pass
-
-
-def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
- """Check if `path` is deletable based on whether the lock file is expired."""
- if path.is_symlink():
- return False
- lock = get_lock_path(path)
- try:
- if not lock.is_file():
- return True
- except OSError:
- # we might not have access to the lock file at all, in this case assume
- # we don't have access to the entire directory (#7491).
- return False
- try:
- lock_time = lock.stat().st_mtime
- except Exception:
- return False
- else:
- if lock_time < consider_lock_dead_if_created_before:
- # We want to ignore any errors while trying to remove the lock such as:
- # - PermissionDenied, like the file permissions have changed since the lock creation;
- # - FileNotFoundError, in case another pytest process got here first;
- # and any other cause of failure.
- with contextlib.suppress(OSError):
- lock.unlink()
- return True
- return False
-
-
-def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
- """Try to cleanup a folder if we can ensure it's deletable."""
- if ensure_deletable(path, consider_lock_dead_if_created_before):
- maybe_delete_a_numbered_dir(path)
-
-
-def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
- """List candidates for numbered directories to be removed - follows py.path."""
- max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
- max_delete = max_existing - keep
- paths = find_prefixed(root, prefix)
- paths, paths2 = itertools.tee(paths)
- numbers = map(parse_num, extract_suffixes(paths2, prefix))
- for path, number in zip(paths, numbers):
- if number <= max_delete:
- yield path
-
-
-def cleanup_numbered_dir(
- root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
-) -> None:
- """Cleanup for lock driven numbered directories."""
- for path in cleanup_candidates(root, prefix, keep):
- try_cleanup(path, consider_lock_dead_if_created_before)
- for path in root.glob("garbage-*"):
- try_cleanup(path, consider_lock_dead_if_created_before)
-
-
-def make_numbered_dir_with_cleanup(
- root: Path,
- prefix: str,
- keep: int,
- lock_timeout: float,
- mode: int,
-) -> Path:
- """Create a numbered dir with a cleanup lock and remove old ones."""
- e = None
- for i in range(10):
- try:
- p = make_numbered_dir(root, prefix, mode)
- lock_path = create_cleanup_lock(p)
- register_cleanup_lock_removal(lock_path)
- except Exception as exc:
- e = exc
- else:
- consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
- # Register a cleanup for program exit
- atexit.register(
- cleanup_numbered_dir,
- root,
- prefix,
- keep,
- consider_lock_dead_if_created_before,
- )
- return p
- assert e is not None
- raise e
-
-
-def resolve_from_str(input: str, rootpath: Path) -> Path:
- input = expanduser(input)
- input = expandvars(input)
- if isabs(input):
- return Path(input)
- else:
- return rootpath.joinpath(input)
-
-
-def fnmatch_ex(pattern: str, path: Union[str, "os.PathLike[str]"]) -> bool:
- """A port of FNMatcher from py.path.common which works with PurePath() instances.
-
- The difference between this algorithm and PurePath.match() is that the
- latter matches "**" glob expressions for each part of the path, while
- this algorithm uses the whole path instead.
-
- For example:
- "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
- with this algorithm, but not with PurePath.match().
-
- This algorithm was ported to keep backward-compatibility with existing
- settings which assume paths match according this logic.
-
- References:
- * https://bugs.python.org/issue29249
- * https://bugs.python.org/issue34731
- """
- path = PurePath(path)
- iswin32 = sys.platform.startswith("win")
-
- if iswin32 and sep not in pattern and posix_sep in pattern:
- # Running on Windows, the pattern has no Windows path separators,
- # and the pattern has one or more Posix path separators. Replace
- # the Posix path separators with the Windows path separator.
- pattern = pattern.replace(posix_sep, sep)
-
- if sep not in pattern:
- name = path.name
- else:
- name = str(path)
- if path.is_absolute() and not os.path.isabs(pattern):
- pattern = f"*{os.sep}{pattern}"
- return fnmatch.fnmatch(name, pattern)
-
-
-def parts(s: str) -> Set[str]:
- parts = s.split(sep)
- return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
-
-
-def symlink_or_skip(src, dst, **kwargs):
- """Make a symlink, or skip the test in case symlinks are not supported."""
- try:
- os.symlink(str(src), str(dst), **kwargs)
- except OSError as e:
- skip(f"symlinks not supported: {e}")
-
-
-class ImportMode(Enum):
- """Possible values for `mode` parameter of `import_path`."""
-
- prepend = "prepend"
- append = "append"
- importlib = "importlib"
-
-
-class ImportPathMismatchError(ImportError):
- """Raised on import_path() if there is a mismatch of __file__'s.
-
- This can happen when `import_path` is called multiple times with different filenames that has
- the same basename but reside in packages
- (for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
- """
-
-
-def import_path(
- p: Union[str, "os.PathLike[str]"],
- *,
- mode: Union[str, ImportMode] = ImportMode.prepend,
- root: Path,
-) -> ModuleType:
- """Import and return a module from the given path, which can be a file (a module) or
- a directory (a package).
-
- The import mechanism used is controlled by the `mode` parameter:
-
- * `mode == ImportMode.prepend`: the directory containing the module (or package, taking
- `__init__.py` files into account) will be put at the *start* of `sys.path` before
- being imported with `__import__.
-
- * `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
- to the end of `sys.path`, if not already in `sys.path`.
-
- * `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
- to import the module, which avoids having to use `__import__` and muck with `sys.path`
- at all. It effectively allows having same-named test modules in different places.
-
- :param root:
- Used as an anchor when mode == ImportMode.importlib to obtain
- a unique name for the module being imported so it can safely be stored
- into ``sys.modules``.
-
- :raises ImportPathMismatchError:
- If after importing the given `path` and the module `__file__`
- are different. Only raised in `prepend` and `append` modes.
- """
- mode = ImportMode(mode)
-
- path = Path(p)
-
- if not path.exists():
- raise ImportError(path)
-
- if mode is ImportMode.importlib:
- module_name = module_name_from_path(path, root)
-
- for meta_importer in sys.meta_path:
- spec = meta_importer.find_spec(module_name, [str(path.parent)])
- if spec is not None:
- break
- else:
- spec = importlib.util.spec_from_file_location(module_name, str(path))
-
- if spec is None:
- raise ImportError(f"Can't find module {module_name} at location {path}")
- mod = importlib.util.module_from_spec(spec)
- sys.modules[module_name] = mod
- spec.loader.exec_module(mod) # type: ignore[union-attr]
- insert_missing_modules(sys.modules, module_name)
- return mod
-
- pkg_path = resolve_package_path(path)
- if pkg_path is not None:
- pkg_root = pkg_path.parent
- names = list(path.with_suffix("").relative_to(pkg_root).parts)
- if names[-1] == "__init__":
- names.pop()
- module_name = ".".join(names)
- else:
- pkg_root = path.parent
- module_name = path.stem
-
- # Change sys.path permanently: restoring it at the end of this function would cause surprising
- # problems because of delayed imports: for example, a conftest.py file imported by this function
- # might have local imports, which would fail at runtime if we restored sys.path.
- if mode is ImportMode.append:
- if str(pkg_root) not in sys.path:
- sys.path.append(str(pkg_root))
- elif mode is ImportMode.prepend:
- if str(pkg_root) != sys.path[0]:
- sys.path.insert(0, str(pkg_root))
- else:
- assert_never(mode)
-
- importlib.import_module(module_name)
-
- mod = sys.modules[module_name]
- if path.name == "__init__.py":
- return mod
-
- ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
- if ignore != "1":
- module_file = mod.__file__
- if module_file is None:
- raise ImportPathMismatchError(module_name, module_file, path)
-
- if module_file.endswith((".pyc", ".pyo")):
- module_file = module_file[:-1]
- if module_file.endswith(os.path.sep + "__init__.py"):
- module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
-
- try:
- is_same = _is_same(str(path), module_file)
- except FileNotFoundError:
- is_same = False
-
- if not is_same:
- raise ImportPathMismatchError(module_name, module_file, path)
-
- return mod
-
-
-# Implement a special _is_same function on Windows which returns True if the two filenames
-# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678).
-if sys.platform.startswith("win"):
-
- def _is_same(f1: str, f2: str) -> bool:
- return Path(f1) == Path(f2) or os.path.samefile(f1, f2)
-
-else:
-
- def _is_same(f1: str, f2: str) -> bool:
- return os.path.samefile(f1, f2)
-
-
-def module_name_from_path(path: Path, root: Path) -> str:
- """
- Return a dotted module name based on the given path, anchored on root.
-
- For example: path="projects/src/tests/test_foo.py" and root="/projects", the
- resulting module name will be "src.tests.test_foo".
- """
- path = path.with_suffix("")
- try:
- relative_path = path.relative_to(root)
- except ValueError:
- # If we can't get a relative path to root, use the full path, except
- # for the first part ("d:\\" or "/" depending on the platform, for example).
- path_parts = path.parts[1:]
- else:
- # Use the parts for the relative path to the root path.
- path_parts = relative_path.parts
-
- return ".".join(path_parts)
-
-
-def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None:
- """
- Used by ``import_path`` to create intermediate modules when using mode=importlib.
-
- When we want to import a module as "src.tests.test_foo" for example, we need
- to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo",
- otherwise "src.tests.test_foo" is not importable by ``__import__``.
- """
- module_parts = module_name.split(".")
- while module_name:
- if module_name not in modules:
- try:
- # If sys.meta_path is empty, calling import_module will issue
- # a warning and raise ModuleNotFoundError. To avoid the
- # warning, we check sys.meta_path explicitly and raise the error
- # ourselves to fall back to creating a dummy module.
- if not sys.meta_path:
- raise ModuleNotFoundError
- importlib.import_module(module_name)
- except ModuleNotFoundError:
- module = ModuleType(
- module_name,
- doc="Empty module created by pytest's importmode=importlib.",
- )
- modules[module_name] = module
- module_parts.pop(-1)
- module_name = ".".join(module_parts)
-
-
-def resolve_package_path(path: Path) -> Optional[Path]:
- """Return the Python package path by looking for the last
- directory upwards which still contains an __init__.py.
-
- Returns None if it can not be determined.
- """
- result = None
- for parent in itertools.chain((path,), path.parents):
- if parent.is_dir():
- if not parent.joinpath("__init__.py").is_file():
- break
- if not parent.name.isidentifier():
- break
- result = parent
- return result
-
-
-def visit(
- path: Union[str, "os.PathLike[str]"], recurse: Callable[["os.DirEntry[str]"], bool]
-) -> Iterator["os.DirEntry[str]"]:
- """Walk a directory recursively, in breadth-first order.
-
- Entries at each directory level are sorted.
- """
-
- # Skip entries with symlink loops and other brokenness, so the caller doesn't
- # have to deal with it.
- entries = []
- for entry in os.scandir(path):
- try:
- entry.is_file()
- except OSError as err:
- if _ignore_error(err):
- continue
- raise
- entries.append(entry)
-
- entries.sort(key=lambda entry: entry.name)
-
- yield from entries
-
- for entry in entries:
- if entry.is_dir() and recurse(entry):
- yield from visit(entry.path, recurse)
-
-
-def absolutepath(path: Union[Path, str]) -> Path:
- """Convert a path to an absolute path using os.path.abspath.
-
- Prefer this over Path.resolve() (see #6523).
- Prefer this over Path.absolute() (not public, doesn't normalize).
- """
- return Path(os.path.abspath(str(path)))
-
-
-def commonpath(path1: Path, path2: Path) -> Optional[Path]:
- """Return the common part shared with the other path, or None if there is
- no common part.
-
- If one path is relative and one is absolute, returns None.
- """
- try:
- return Path(os.path.commonpath((str(path1), str(path2))))
- except ValueError:
- return None
-
-
-def bestrelpath(directory: Path, dest: Path) -> str:
- """Return a string which is a relative path from directory to dest such
- that directory/bestrelpath == dest.
-
- The paths must be either both absolute or both relative.
-
- If no such path can be determined, returns dest.
- """
- assert isinstance(directory, Path)
- assert isinstance(dest, Path)
- if dest == directory:
- return os.curdir
- # Find the longest common directory.
- base = commonpath(directory, dest)
- # Can be the case on Windows for two absolute paths on different drives.
- # Can be the case for two relative paths without common prefix.
- # Can be the case for a relative path and an absolute path.
- if not base:
- return str(dest)
- reldirectory = directory.relative_to(base)
- reldest = dest.relative_to(base)
- return os.path.join(
- # Back from directory to base.
- *([os.pardir] * len(reldirectory.parts)),
- # Forward from base to dest.
- *reldest.parts,
- )
-
-
-# Originates from py. path.local.copy(), with siginficant trims and adjustments.
-# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True)
-def copytree(source: Path, target: Path) -> None:
- """Recursively copy a source directory to target."""
- assert source.is_dir()
- for entry in visit(source, recurse=lambda entry: not entry.is_symlink()):
- x = Path(entry)
- relpath = x.relative_to(source)
- newx = target / relpath
- newx.parent.mkdir(exist_ok=True)
- if x.is_symlink():
- newx.symlink_to(os.readlink(x))
- elif x.is_file():
- shutil.copyfile(x, newx)
- elif x.is_dir():
- newx.mkdir(exist_ok=True)
diff --git a/contrib/python/pytest/py3/_pytest/py.typed b/contrib/python/pytest/py3/_pytest/py.typed
deleted file mode 100644
index e69de29bb2..0000000000
--- a/contrib/python/pytest/py3/_pytest/py.typed
+++ /dev/null
diff --git a/contrib/python/pytest/py3/_pytest/pytester.py b/contrib/python/pytest/py3/_pytest/pytester.py
deleted file mode 100644
index 8368f94412..0000000000
--- a/contrib/python/pytest/py3/_pytest/pytester.py
+++ /dev/null
@@ -1,1750 +0,0 @@
-"""(Disabled by default) support for testing pytest and pytest plugins.
-
-PYTEST_DONT_REWRITE
-"""
-import collections.abc
-import contextlib
-import gc
-import importlib
-import os
-import platform
-import re
-import shutil
-import subprocess
-import sys
-import traceback
-from fnmatch import fnmatch
-from io import StringIO
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import Dict
-from typing import Generator
-from typing import IO
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import overload
-from typing import Sequence
-from typing import TextIO
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-from weakref import WeakKeyDictionary
-
-from iniconfig import IniConfig
-from iniconfig import SectionWrapper
-
-from _pytest import timing
-from _pytest._code import Source
-from _pytest.capture import _get_multicapture
-from _pytest.compat import final
-from _pytest.compat import NOTSET
-from _pytest.compat import NotSetType
-from _pytest.config import _PluggyPlugin
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config import main
-from _pytest.config import PytestPluginManager
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import FixtureRequest
-from _pytest.main import Session
-from _pytest.monkeypatch import MonkeyPatch
-from _pytest.nodes import Collector
-from _pytest.nodes import Item
-from _pytest.outcomes import fail
-from _pytest.outcomes import importorskip
-from _pytest.outcomes import skip
-from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import copytree
-from _pytest.pathlib import make_numbered_dir
-from _pytest.reports import CollectReport
-from _pytest.reports import TestReport
-from _pytest.tmpdir import TempPathFactory
-from _pytest.warning_types import PytestWarning
-
-
-if TYPE_CHECKING:
- from typing_extensions import Final
- from typing_extensions import Literal
-
- import pexpect
-
-
-pytest_plugins = ["pytester_assertions"]
-
-
-IGNORE_PAM = [ # filenames added when obtaining details about the current user
- "/var/lib/sss/mc/passwd"
-]
-
-
-def pytest_addoption(parser: Parser) -> None:
- parser.addoption(
- "--lsof",
- action="store_true",
- dest="lsof",
- default=False,
- help="run FD checks if lsof is available",
- )
-
- parser.addoption(
- "--runpytest",
- default="inprocess",
- dest="runpytest",
- choices=("inprocess", "subprocess"),
- help=(
- "run pytest sub runs in tests using an 'inprocess' "
- "or 'subprocess' (python -m main) method"
- ),
- )
-
- parser.addini(
- "pytester_example_dir", help="directory to take the pytester example files from"
- )
-
-
-def pytest_configure(config: Config) -> None:
- if config.getvalue("lsof"):
- checker = LsofFdLeakChecker()
- if checker.matching_platform():
- config.pluginmanager.register(checker)
-
- config.addinivalue_line(
- "markers",
- "pytester_example_path(*path_segments): join the given path "
- "segments to `pytester_example_dir` for this test.",
- )
-
-
-class LsofFdLeakChecker:
- def get_open_files(self) -> List[Tuple[str, str]]:
- out = subprocess.run(
- ("lsof", "-Ffn0", "-p", str(os.getpid())),
- stdout=subprocess.PIPE,
- stderr=subprocess.DEVNULL,
- check=True,
- text=True,
- ).stdout
-
- def isopen(line: str) -> bool:
- return line.startswith("f") and (
- "deleted" not in line
- and "mem" not in line
- and "txt" not in line
- and "cwd" not in line
- )
-
- open_files = []
-
- for line in out.split("\n"):
- if isopen(line):
- fields = line.split("\0")
- fd = fields[0][1:]
- filename = fields[1][1:]
- if filename in IGNORE_PAM:
- continue
- if filename.startswith("/"):
- open_files.append((fd, filename))
-
- return open_files
-
- def matching_platform(self) -> bool:
- try:
- subprocess.run(("lsof", "-v"), check=True)
- except (OSError, subprocess.CalledProcessError):
- return False
- else:
- return True
-
- @hookimpl(hookwrapper=True, tryfirst=True)
- def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]:
- lines1 = self.get_open_files()
- yield
- if hasattr(sys, "pypy_version_info"):
- gc.collect()
- lines2 = self.get_open_files()
-
- new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
- leaked_files = [t for t in lines2 if t[0] in new_fds]
- if leaked_files:
- error = [
- "***** %s FD leakage detected" % len(leaked_files),
- *(str(f) for f in leaked_files),
- "*** Before:",
- *(str(f) for f in lines1),
- "*** After:",
- *(str(f) for f in lines2),
- "***** %s FD leakage detected" % len(leaked_files),
- "*** function %s:%s: %s " % item.location,
- "See issue #2366",
- ]
- item.warn(PytestWarning("\n".join(error)))
-
-
-# used at least by pytest-xdist plugin
-
-
-@fixture
-def _pytest(request: FixtureRequest) -> "PytestArg":
- """Return a helper which offers a gethookrecorder(hook) method which
- returns a HookRecorder instance which helps to make assertions about called
- hooks."""
- return PytestArg(request)
-
-
-class PytestArg:
- def __init__(self, request: FixtureRequest) -> None:
- self._request = request
-
- def gethookrecorder(self, hook) -> "HookRecorder":
- hookrecorder = HookRecorder(hook._pm)
- self._request.addfinalizer(hookrecorder.finish_recording)
- return hookrecorder
-
-
-def get_public_names(values: Iterable[str]) -> List[str]:
- """Only return names from iterator values without a leading underscore."""
- return [x for x in values if x[0] != "_"]
-
-
-@final
-class RecordedHookCall:
- """A recorded call to a hook.
-
- The arguments to the hook call are set as attributes.
- For example:
-
- .. code-block:: python
-
- calls = hook_recorder.getcalls("pytest_runtest_setup")
- # Suppose pytest_runtest_setup was called once with `item=an_item`.
- assert calls[0].item is an_item
- """
-
- def __init__(self, name: str, kwargs) -> None:
- self.__dict__.update(kwargs)
- self._name = name
-
- def __repr__(self) -> str:
- d = self.__dict__.copy()
- del d["_name"]
- return f"<RecordedHookCall {self._name!r}(**{d!r})>"
-
- if TYPE_CHECKING:
- # The class has undetermined attributes, this tells mypy about it.
- def __getattr__(self, key: str):
- ...
-
-
-@final
-class HookRecorder:
- """Record all hooks called in a plugin manager.
-
- Hook recorders are created by :class:`Pytester`.
-
- This wraps all the hook calls in the plugin manager, recording each call
- before propagating the normal calls.
- """
-
- def __init__(
- self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False
- ) -> None:
- check_ispytest(_ispytest)
-
- self._pluginmanager = pluginmanager
- self.calls: List[RecordedHookCall] = []
- self.ret: Optional[Union[int, ExitCode]] = None
-
- def before(hook_name: str, hook_impls, kwargs) -> None:
- self.calls.append(RecordedHookCall(hook_name, kwargs))
-
- def after(outcome, hook_name: str, hook_impls, kwargs) -> None:
- pass
-
- self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
-
- def finish_recording(self) -> None:
- self._undo_wrapping()
-
- def getcalls(self, names: Union[str, Iterable[str]]) -> List[RecordedHookCall]:
- """Get all recorded calls to hooks with the given names (or name)."""
- if isinstance(names, str):
- names = names.split()
- return [call for call in self.calls if call._name in names]
-
- def assert_contains(self, entries: Sequence[Tuple[str, str]]) -> None:
- __tracebackhide__ = True
- i = 0
- entries = list(entries)
- backlocals = sys._getframe(1).f_locals
- while entries:
- name, check = entries.pop(0)
- for ind, call in enumerate(self.calls[i:]):
- if call._name == name:
- print("NAMEMATCH", name, call)
- if eval(check, backlocals, call.__dict__):
- print("CHECKERMATCH", repr(check), "->", call)
- else:
- print("NOCHECKERMATCH", repr(check), "-", call)
- continue
- i += ind + 1
- break
- print("NONAMEMATCH", name, "with", call)
- else:
- fail(f"could not find {name!r} check {check!r}")
-
- def popcall(self, name: str) -> RecordedHookCall:
- __tracebackhide__ = True
- for i, call in enumerate(self.calls):
- if call._name == name:
- del self.calls[i]
- return call
- lines = [f"could not find call {name!r}, in:"]
- lines.extend([" %s" % x for x in self.calls])
- fail("\n".join(lines))
-
- def getcall(self, name: str) -> RecordedHookCall:
- values = self.getcalls(name)
- assert len(values) == 1, (name, values)
- return values[0]
-
- # functionality for test reports
-
- @overload
- def getreports(
- self,
- names: "Literal['pytest_collectreport']",
- ) -> Sequence[CollectReport]:
- ...
-
- @overload
- def getreports(
- self,
- names: "Literal['pytest_runtest_logreport']",
- ) -> Sequence[TestReport]:
- ...
-
- @overload
- def getreports(
- self,
- names: Union[str, Iterable[str]] = (
- "pytest_collectreport",
- "pytest_runtest_logreport",
- ),
- ) -> Sequence[Union[CollectReport, TestReport]]:
- ...
-
- def getreports(
- self,
- names: Union[str, Iterable[str]] = (
- "pytest_collectreport",
- "pytest_runtest_logreport",
- ),
- ) -> Sequence[Union[CollectReport, TestReport]]:
- return [x.report for x in self.getcalls(names)]
-
- def matchreport(
- self,
- inamepart: str = "",
- names: Union[str, Iterable[str]] = (
- "pytest_runtest_logreport",
- "pytest_collectreport",
- ),
- when: Optional[str] = None,
- ) -> Union[CollectReport, TestReport]:
- """Return a testreport whose dotted import path matches."""
- values = []
- for rep in self.getreports(names=names):
- if not when and rep.when != "call" and rep.passed:
- # setup/teardown passing reports - let's ignore those
- continue
- if when and rep.when != when:
- continue
- if not inamepart or inamepart in rep.nodeid.split("::"):
- values.append(rep)
- if not values:
- raise ValueError(
- "could not find test report matching %r: "
- "no test reports at all!" % (inamepart,)
- )
- if len(values) > 1:
- raise ValueError(
- "found 2 or more testreports matching {!r}: {}".format(
- inamepart, values
- )
- )
- return values[0]
-
- @overload
- def getfailures(
- self,
- names: "Literal['pytest_collectreport']",
- ) -> Sequence[CollectReport]:
- ...
-
- @overload
- def getfailures(
- self,
- names: "Literal['pytest_runtest_logreport']",
- ) -> Sequence[TestReport]:
- ...
-
- @overload
- def getfailures(
- self,
- names: Union[str, Iterable[str]] = (
- "pytest_collectreport",
- "pytest_runtest_logreport",
- ),
- ) -> Sequence[Union[CollectReport, TestReport]]:
- ...
-
- def getfailures(
- self,
- names: Union[str, Iterable[str]] = (
- "pytest_collectreport",
- "pytest_runtest_logreport",
- ),
- ) -> Sequence[Union[CollectReport, TestReport]]:
- return [rep for rep in self.getreports(names) if rep.failed]
-
- def getfailedcollections(self) -> Sequence[CollectReport]:
- return self.getfailures("pytest_collectreport")
-
- def listoutcomes(
- self,
- ) -> Tuple[
- Sequence[TestReport],
- Sequence[Union[CollectReport, TestReport]],
- Sequence[Union[CollectReport, TestReport]],
- ]:
- passed = []
- skipped = []
- failed = []
- for rep in self.getreports(
- ("pytest_collectreport", "pytest_runtest_logreport")
- ):
- if rep.passed:
- if rep.when == "call":
- assert isinstance(rep, TestReport)
- passed.append(rep)
- elif rep.skipped:
- skipped.append(rep)
- else:
- assert rep.failed, f"Unexpected outcome: {rep!r}"
- failed.append(rep)
- return passed, skipped, failed
-
- def countoutcomes(self) -> List[int]:
- return [len(x) for x in self.listoutcomes()]
-
- def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None:
- __tracebackhide__ = True
- from _pytest.pytester_assertions import assertoutcome
-
- outcomes = self.listoutcomes()
- assertoutcome(
- outcomes,
- passed=passed,
- skipped=skipped,
- failed=failed,
- )
-
- def clear(self) -> None:
- self.calls[:] = []
-
-
-@fixture
-def linecomp() -> "LineComp":
- """A :class: `LineComp` instance for checking that an input linearly
- contains a sequence of strings."""
- return LineComp()
-
-
-@fixture(name="LineMatcher")
-def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
- """A reference to the :class: `LineMatcher`.
-
- This is instantiable with a list of lines (without their trailing newlines).
- This is useful for testing large texts, such as the output of commands.
- """
- return LineMatcher
-
-
-@fixture
-def pytester(
- request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch
-) -> "Pytester":
- """
- Facilities to write tests/configuration files, execute pytest in isolation, and match
- against expected output, perfect for black-box testing of pytest plugins.
-
- It attempts to isolate the test run from external factors as much as possible, modifying
- the current working directory to ``path`` and environment variables during initialization.
-
- It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path`
- fixture but provides methods which aid in testing pytest itself.
- """
- return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True)
-
-
-@fixture
-def _sys_snapshot() -> Generator[None, None, None]:
- snappaths = SysPathsSnapshot()
- snapmods = SysModulesSnapshot()
- yield
- snapmods.restore()
- snappaths.restore()
-
-
-@fixture
-def _config_for_test() -> Generator[Config, None, None]:
- from _pytest.config import get_config
-
- config = get_config()
- yield config
- config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
-
-
-# Regex to match the session duration string in the summary: "74.34s".
-rex_session_duration = re.compile(r"\d+\.\d\ds")
-# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
-rex_outcome = re.compile(r"(\d+) (\w+)")
-
-
-@final
-class RunResult:
- """The result of running a command from :class:`~pytest.Pytester`."""
-
- def __init__(
- self,
- ret: Union[int, ExitCode],
- outlines: List[str],
- errlines: List[str],
- duration: float,
- ) -> None:
- try:
- self.ret: Union[int, ExitCode] = ExitCode(ret)
- """The return value."""
- except ValueError:
- self.ret = ret
- self.outlines = outlines
- """List of lines captured from stdout."""
- self.errlines = errlines
- """List of lines captured from stderr."""
- self.stdout = LineMatcher(outlines)
- """:class:`~pytest.LineMatcher` of stdout.
-
- Use e.g. :func:`str(stdout) <pytest.LineMatcher.__str__()>` to reconstruct stdout, or the commonly used
- :func:`stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>` method.
- """
- self.stderr = LineMatcher(errlines)
- """:class:`~pytest.LineMatcher` of stderr."""
- self.duration = duration
- """Duration in seconds."""
-
- def __repr__(self) -> str:
- return (
- "<RunResult ret=%s len(stdout.lines)=%d len(stderr.lines)=%d duration=%.2fs>"
- % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration)
- )
-
- def parseoutcomes(self) -> Dict[str, int]:
- """Return a dictionary of outcome noun -> count from parsing the terminal
- output that the test process produced.
-
- The returned nouns will always be in plural form::
-
- ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
-
- Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
- """
- return self.parse_summary_nouns(self.outlines)
-
- @classmethod
- def parse_summary_nouns(cls, lines) -> Dict[str, int]:
- """Extract the nouns from a pytest terminal summary line.
-
- It always returns the plural noun for consistency::
-
- ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
-
- Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
- """
- for line in reversed(lines):
- if rex_session_duration.search(line):
- outcomes = rex_outcome.findall(line)
- ret = {noun: int(count) for (count, noun) in outcomes}
- break
- else:
- raise ValueError("Pytest terminal summary report not found")
-
- to_plural = {
- "warning": "warnings",
- "error": "errors",
- }
- return {to_plural.get(k, k): v for k, v in ret.items()}
-
- def assert_outcomes(
- self,
- passed: int = 0,
- skipped: int = 0,
- failed: int = 0,
- errors: int = 0,
- xpassed: int = 0,
- xfailed: int = 0,
- warnings: Optional[int] = None,
- deselected: Optional[int] = None,
- ) -> None:
- """
- Assert that the specified outcomes appear with the respective
- numbers (0 means it didn't occur) in the text output from a test run.
-
- ``warnings`` and ``deselected`` are only checked if not None.
- """
- __tracebackhide__ = True
- from _pytest.pytester_assertions import assert_outcomes
-
- outcomes = self.parseoutcomes()
- assert_outcomes(
- outcomes,
- passed=passed,
- skipped=skipped,
- failed=failed,
- errors=errors,
- xpassed=xpassed,
- xfailed=xfailed,
- warnings=warnings,
- deselected=deselected,
- )
-
-
-class CwdSnapshot:
- def __init__(self) -> None:
- self.__saved = os.getcwd()
-
- def restore(self) -> None:
- os.chdir(self.__saved)
-
-
-class SysModulesSnapshot:
- def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None:
- self.__preserve = preserve
- self.__saved = dict(sys.modules)
-
- def restore(self) -> None:
- if self.__preserve:
- self.__saved.update(
- (k, m) for k, m in sys.modules.items() if self.__preserve(k)
- )
- sys.modules.clear()
- sys.modules.update(self.__saved)
-
-
-class SysPathsSnapshot:
- def __init__(self) -> None:
- self.__saved = list(sys.path), list(sys.meta_path)
-
- def restore(self) -> None:
- sys.path[:], sys.meta_path[:] = self.__saved
-
-
-@final
-class Pytester:
- """
- Facilities to write tests/configuration files, execute pytest in isolation, and match
- against expected output, perfect for black-box testing of pytest plugins.
-
- It attempts to isolate the test run from external factors as much as possible, modifying
- the current working directory to ``path`` and environment variables during initialization.
-
- Attributes:
-
- :ivar Path path: temporary directory path used to create files/run tests from, etc.
-
- :ivar plugins:
- A list of plugins to use with :py:meth:`parseconfig` and
- :py:meth:`runpytest`. Initially this is an empty list but plugins can
- be added to the list. The type of items to add to the list depends on
- the method using them so refer to them for details.
- """
-
- __test__ = False
-
- CLOSE_STDIN: "Final" = NOTSET
-
- class TimeoutExpired(Exception):
- pass
-
- def __init__(
- self,
- request: FixtureRequest,
- tmp_path_factory: TempPathFactory,
- monkeypatch: MonkeyPatch,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self._request = request
- self._mod_collections: WeakKeyDictionary[
- Collector, List[Union[Item, Collector]]
- ] = WeakKeyDictionary()
- if request.function:
- name: str = request.function.__name__
- else:
- name = request.node.name
- self._name = name
- self._path: Path = tmp_path_factory.mktemp(name, numbered=True)
- self.plugins: List[Union[str, _PluggyPlugin]] = []
- self._cwd_snapshot = CwdSnapshot()
- self._sys_path_snapshot = SysPathsSnapshot()
- self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
- self.chdir()
- self._request.addfinalizer(self._finalize)
- self._method = self._request.config.getoption("--runpytest")
- self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True)
-
- self._monkeypatch = mp = monkeypatch
- mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot))
- # Ensure no unexpected caching via tox.
- mp.delenv("TOX_ENV_DIR", raising=False)
- # Discard outer pytest options.
- mp.delenv("PYTEST_ADDOPTS", raising=False)
- # Ensure no user config is used.
- tmphome = str(self.path)
- mp.setenv("HOME", tmphome)
- mp.setenv("USERPROFILE", tmphome)
- # Do not use colors for inner runs by default.
- mp.setenv("PY_COLORS", "0")
-
- @property
- def path(self) -> Path:
- """Temporary directory where files are created and pytest is executed."""
- return self._path
-
- def __repr__(self) -> str:
- return f"<Pytester {self.path!r}>"
-
- def _finalize(self) -> None:
- """
- Clean up global state artifacts.
-
- Some methods modify the global interpreter state and this tries to
- clean this up. It does not remove the temporary directory however so
- it can be looked at after the test run has finished.
- """
- self._sys_modules_snapshot.restore()
- self._sys_path_snapshot.restore()
- self._cwd_snapshot.restore()
-
- def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
- # Some zope modules used by twisted-related tests keep internal state
- # and can't be deleted; we had some trouble in the past with
- # `zope.interface` for example.
- #
- # Preserve readline due to https://bugs.python.org/issue41033.
- # pexpect issues a SIGWINCH.
- def preserve_module(name):
- return name.startswith(("zope", "readline"))
-
- return SysModulesSnapshot(preserve=preserve_module)
-
- def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder:
- """Create a new :py:class:`HookRecorder` for a PluginManager."""
- pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True)
- self._request.addfinalizer(reprec.finish_recording)
- return reprec
-
- def chdir(self) -> None:
- """Cd into the temporary directory.
-
- This is done automatically upon instantiation.
- """
- os.chdir(self.path)
-
- def _makefile(
- self,
- ext: str,
- lines: Sequence[Union[Any, bytes]],
- files: Dict[str, str],
- encoding: str = "utf-8",
- ) -> Path:
- items = list(files.items())
-
- if ext and not ext.startswith("."):
- raise ValueError(
- f"pytester.makefile expects a file extension, try .{ext} instead of {ext}"
- )
-
- def to_text(s: Union[Any, bytes]) -> str:
- return s.decode(encoding) if isinstance(s, bytes) else str(s)
-
- if lines:
- source = "\n".join(to_text(x) for x in lines)
- basename = self._name
- items.insert(0, (basename, source))
-
- ret = None
- for basename, value in items:
- p = self.path.joinpath(basename).with_suffix(ext)
- p.parent.mkdir(parents=True, exist_ok=True)
- source_ = Source(value)
- source = "\n".join(to_text(line) for line in source_.lines)
- p.write_text(source.strip(), encoding=encoding)
- if ret is None:
- ret = p
- assert ret is not None
- return ret
-
- def makefile(self, ext: str, *args: str, **kwargs: str) -> Path:
- r"""Create new text file(s) in the test directory.
-
- :param str ext:
- The extension the file(s) should use, including the dot, e.g. `.py`.
- :param args:
- All args are treated as strings and joined using newlines.
- The result is written as contents to the file. The name of the
- file is based on the test function requesting this fixture.
- :param kwargs:
- Each keyword is the name of a file, while the value of it will
- be written as contents of the file.
-
- Examples:
-
- .. code-block:: python
-
- pytester.makefile(".txt", "line1", "line2")
-
- pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
-
- To create binary files, use :meth:`pathlib.Path.write_bytes` directly:
-
- .. code-block:: python
-
- filename = pytester.path.joinpath("foo.bin")
- filename.write_bytes(b"...")
- """
- return self._makefile(ext, args, kwargs)
-
- def makeconftest(self, source: str) -> Path:
- """Write a contest.py file with 'source' as contents."""
- return self.makepyfile(conftest=source)
-
- def makeini(self, source: str) -> Path:
- """Write a tox.ini file with 'source' as contents."""
- return self.makefile(".ini", tox=source)
-
- def getinicfg(self, source: str) -> SectionWrapper:
- """Return the pytest section from the tox.ini config file."""
- p = self.makeini(source)
- return IniConfig(str(p))["pytest"]
-
- def makepyprojecttoml(self, source: str) -> Path:
- """Write a pyproject.toml file with 'source' as contents.
-
- .. versionadded:: 6.0
- """
- return self.makefile(".toml", pyproject=source)
-
- def makepyfile(self, *args, **kwargs) -> Path:
- r"""Shortcut for .makefile() with a .py extension.
-
- Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting
- existing files.
-
- Examples:
-
- .. code-block:: python
-
- def test_something(pytester):
- # Initial file is created test_something.py.
- pytester.makepyfile("foobar")
- # To create multiple files, pass kwargs accordingly.
- pytester.makepyfile(custom="foobar")
- # At this point, both 'test_something.py' & 'custom.py' exist in the test directory.
-
- """
- return self._makefile(".py", args, kwargs)
-
- def maketxtfile(self, *args, **kwargs) -> Path:
- r"""Shortcut for .makefile() with a .txt extension.
-
- Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting
- existing files.
-
- Examples:
-
- .. code-block:: python
-
- def test_something(pytester):
- # Initial file is created test_something.txt.
- pytester.maketxtfile("foobar")
- # To create multiple files, pass kwargs accordingly.
- pytester.maketxtfile(custom="foobar")
- # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory.
-
- """
- return self._makefile(".txt", args, kwargs)
-
- def syspathinsert(
- self, path: Optional[Union[str, "os.PathLike[str]"]] = None
- ) -> None:
- """Prepend a directory to sys.path, defaults to :attr:`path`.
-
- This is undone automatically when this object dies at the end of each
- test.
- """
- if path is None:
- path = self.path
-
- self._monkeypatch.syspath_prepend(str(path))
-
- def mkdir(self, name: str) -> Path:
- """Create a new (sub)directory."""
- p = self.path / name
- p.mkdir()
- return p
-
- def mkpydir(self, name: str) -> Path:
- """Create a new python package.
-
- This creates a (sub)directory with an empty ``__init__.py`` file so it
- gets recognised as a Python package.
- """
- p = self.path / name
- p.mkdir()
- p.joinpath("__init__.py").touch()
- return p
-
- def copy_example(self, name: Optional[str] = None) -> Path:
- """Copy file from project's directory into the testdir.
-
- :param str name: The name of the file to copy.
- :return: path to the copied directory (inside ``self.path``).
-
- """
- example_dir = self._request.config.getini("pytester_example_dir")
- if example_dir is None:
- raise ValueError("pytester_example_dir is unset, can't copy examples")
- example_dir = self._request.config.rootpath / example_dir
-
- for extra_element in self._request.node.iter_markers("pytester_example_path"):
- assert extra_element.args
- example_dir = example_dir.joinpath(*extra_element.args)
-
- if name is None:
- func_name = self._name
- maybe_dir = example_dir / func_name
- maybe_file = example_dir / (func_name + ".py")
-
- if maybe_dir.is_dir():
- example_path = maybe_dir
- elif maybe_file.is_file():
- example_path = maybe_file
- else:
- raise LookupError(
- f"{func_name} can't be found as module or package in {example_dir}"
- )
- else:
- example_path = example_dir.joinpath(name)
-
- if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file():
- copytree(example_path, self.path)
- return self.path
- elif example_path.is_file():
- result = self.path.joinpath(example_path.name)
- shutil.copy(example_path, result)
- return result
- else:
- raise LookupError(
- f'example "{example_path}" is not found as a file or directory'
- )
-
- def getnode(
- self, config: Config, arg: Union[str, "os.PathLike[str]"]
- ) -> Optional[Union[Collector, Item]]:
- """Return the collection node of a file.
-
- :param pytest.Config config:
- A pytest config.
- See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it.
- :param os.PathLike[str] arg:
- Path to the file.
- """
- session = Session.from_config(config)
- assert "::" not in str(arg)
- p = Path(os.path.abspath(arg))
- config.hook.pytest_sessionstart(session=session)
- res = session.perform_collect([str(p)], genitems=False)[0]
- config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
- return res
-
- def getpathnode(self, path: Union[str, "os.PathLike[str]"]):
- """Return the collection node of a file.
-
- This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
- create the (configured) pytest Config instance.
-
- :param os.PathLike[str] path: Path to the file.
- """
- path = Path(path)
- config = self.parseconfigure(path)
- session = Session.from_config(config)
- x = bestrelpath(session.path, path)
- config.hook.pytest_sessionstart(session=session)
- res = session.perform_collect([x], genitems=False)[0]
- config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
- return res
-
- def genitems(self, colitems: Sequence[Union[Item, Collector]]) -> List[Item]:
- """Generate all test items from a collection node.
-
- This recurses into the collection node and returns a list of all the
- test items contained within.
- """
- session = colitems[0].session
- result: List[Item] = []
- for colitem in colitems:
- result.extend(session.genitems(colitem))
- return result
-
- def runitem(self, source: str) -> Any:
- """Run the "test_func" Item.
-
- The calling test instance (class containing the test method) must
- provide a ``.getrunner()`` method which should return a runner which
- can run the test protocol for a single item, e.g.
- :py:func:`_pytest.runner.runtestprotocol`.
- """
- # used from runner functional tests
- item = self.getitem(source)
- # the test class where we are called from wants to provide the runner
- testclassinstance = self._request.instance
- runner = testclassinstance.getrunner()
- return runner(item)
-
- def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder:
- """Run a test module in process using ``pytest.main()``.
-
- This run writes "source" into a temporary file and runs
- ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
- for the result.
-
- :param source: The source code of the test module.
- :param cmdlineargs: Any extra command line arguments to use.
- """
- p = self.makepyfile(source)
- values = list(cmdlineargs) + [p]
- return self.inline_run(*values)
-
- def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]:
- """Run ``pytest.main(['--collectonly'])`` in-process.
-
- Runs the :py:func:`pytest.main` function to run all of pytest inside
- the test process itself like :py:meth:`inline_run`, but returns a
- tuple of the collected items and a :py:class:`HookRecorder` instance.
- """
- rec = self.inline_run("--collect-only", *args)
- items = [x.item for x in rec.getcalls("pytest_itemcollected")]
- return items, rec
-
- def inline_run(
- self,
- *args: Union[str, "os.PathLike[str]"],
- plugins=(),
- no_reraise_ctrlc: bool = False,
- ) -> HookRecorder:
- """Run ``pytest.main()`` in-process, returning a HookRecorder.
-
- Runs the :py:func:`pytest.main` function to run all of pytest inside
- the test process itself. This means it can return a
- :py:class:`HookRecorder` instance which gives more detailed results
- from that run than can be done by matching stdout/stderr from
- :py:meth:`runpytest`.
-
- :param args:
- Command line arguments to pass to :py:func:`pytest.main`.
- :param plugins:
- Extra plugin instances the ``pytest.main()`` instance should use.
- :param no_reraise_ctrlc:
- Typically we reraise keyboard interrupts from the child run. If
- True, the KeyboardInterrupt exception is captured.
- """
- # (maybe a cpython bug?) the importlib cache sometimes isn't updated
- # properly between file creation and inline_run (especially if imports
- # are interspersed with file creation)
- importlib.invalidate_caches()
-
- plugins = list(plugins)
- finalizers = []
- try:
- # Any sys.module or sys.path changes done while running pytest
- # inline should be reverted after the test run completes to avoid
- # clashing with later inline tests run within the same pytest test,
- # e.g. just because they use matching test module names.
- finalizers.append(self.__take_sys_modules_snapshot().restore)
- finalizers.append(SysPathsSnapshot().restore)
-
- # Important note:
- # - our tests should not leave any other references/registrations
- # laying around other than possibly loaded test modules
- # referenced from sys.modules, as nothing will clean those up
- # automatically
-
- rec = []
-
- class Collect:
- def pytest_configure(x, config: Config) -> None:
- rec.append(self.make_hook_recorder(config.pluginmanager))
-
- plugins.append(Collect())
- ret = main([str(x) for x in args], plugins=plugins)
- if len(rec) == 1:
- reprec = rec.pop()
- else:
-
- class reprec: # type: ignore
- pass
-
- reprec.ret = ret
-
- # Typically we reraise keyboard interrupts from the child run
- # because it's our user requesting interruption of the testing.
- if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc:
- calls = reprec.getcalls("pytest_keyboard_interrupt")
- if calls and calls[-1].excinfo.type == KeyboardInterrupt:
- raise KeyboardInterrupt()
- return reprec
- finally:
- for finalizer in finalizers:
- finalizer()
-
- def runpytest_inprocess(
- self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any
- ) -> RunResult:
- """Return result of running pytest in-process, providing a similar
- interface to what self.runpytest() provides."""
- syspathinsert = kwargs.pop("syspathinsert", False)
-
- if syspathinsert:
- self.syspathinsert()
- now = timing.time()
- capture = _get_multicapture("sys")
- capture.start_capturing()
- try:
- try:
- reprec = self.inline_run(*args, **kwargs)
- except SystemExit as e:
- ret = e.args[0]
- try:
- ret = ExitCode(e.args[0])
- except ValueError:
- pass
-
- class reprec: # type: ignore
- ret = ret
-
- except Exception:
- traceback.print_exc()
-
- class reprec: # type: ignore
- ret = ExitCode(3)
-
- finally:
- out, err = capture.readouterr()
- capture.stop_capturing()
- sys.stdout.write(out)
- sys.stderr.write(err)
-
- assert reprec.ret is not None
- res = RunResult(
- reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now
- )
- res.reprec = reprec # type: ignore
- return res
-
- def runpytest(
- self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any
- ) -> RunResult:
- """Run pytest inline or in a subprocess, depending on the command line
- option "--runpytest" and return a :py:class:`~pytest.RunResult`."""
- new_args = self._ensure_basetemp(args)
- if self._method == "inprocess":
- return self.runpytest_inprocess(*new_args, **kwargs)
- elif self._method == "subprocess":
- return self.runpytest_subprocess(*new_args, **kwargs)
- raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
-
- def _ensure_basetemp(
- self, args: Sequence[Union[str, "os.PathLike[str]"]]
- ) -> List[Union[str, "os.PathLike[str]"]]:
- new_args = list(args)
- for x in new_args:
- if str(x).startswith("--basetemp"):
- break
- else:
- new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp"))
- return new_args
-
- def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config:
- """Return a new pytest Config instance from given commandline args.
-
- This invokes the pytest bootstrapping code in _pytest.config to create
- a new :py:class:`_pytest.core.PluginManager` and call the
- pytest_cmdline_parse hook to create a new
- :py:class:`pytest.Config` instance.
-
- If :py:attr:`plugins` has been populated they should be plugin modules
- to be registered with the PluginManager.
- """
- import _pytest.config
-
- new_args = self._ensure_basetemp(args)
- new_args = [str(x) for x in new_args]
-
- config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type]
- # we don't know what the test will do with this half-setup config
- # object and thus we make sure it gets unconfigured properly in any
- # case (otherwise capturing could still be active, for example)
- self._request.addfinalizer(config._ensure_unconfigure)
- return config
-
- def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config:
- """Return a new pytest configured Config instance.
-
- Returns a new :py:class:`pytest.Config` instance like
- :py:meth:`parseconfig`, but also calls the pytest_configure hook.
- """
- config = self.parseconfig(*args)
- config._do_configure()
- return config
-
- def getitem(
- self, source: Union[str, "os.PathLike[str]"], funcname: str = "test_func"
- ) -> Item:
- """Return the test item for a test function.
-
- Writes the source to a python file and runs pytest's collection on
- the resulting module, returning the test item for the requested
- function name.
-
- :param source:
- The module source.
- :param funcname:
- The name of the test function for which to return a test item.
- """
- items = self.getitems(source)
- for item in items:
- if item.name == funcname:
- return item
- assert 0, "{!r} item not found in module:\n{}\nitems: {}".format(
- funcname, source, items
- )
-
- def getitems(self, source: Union[str, "os.PathLike[str]"]) -> List[Item]:
- """Return all test items collected from the module.
-
- Writes the source to a Python file and runs pytest's collection on
- the resulting module, returning all test items contained within.
- """
- modcol = self.getmodulecol(source)
- return self.genitems([modcol])
-
- def getmodulecol(
- self,
- source: Union[str, "os.PathLike[str]"],
- configargs=(),
- *,
- withinit: bool = False,
- ):
- """Return the module collection node for ``source``.
-
- Writes ``source`` to a file using :py:meth:`makepyfile` and then
- runs the pytest collection on it, returning the collection node for the
- test module.
-
- :param source:
- The source code of the module to collect.
-
- :param configargs:
- Any extra arguments to pass to :py:meth:`parseconfigure`.
-
- :param withinit:
- Whether to also write an ``__init__.py`` file to the same
- directory to ensure it is a package.
- """
- if isinstance(source, os.PathLike):
- path = self.path.joinpath(source)
- assert not withinit, "not supported for paths"
- else:
- kw = {self._name: str(source)}
- path = self.makepyfile(**kw)
- if withinit:
- self.makepyfile(__init__="#")
- self.config = config = self.parseconfigure(path, *configargs)
- return self.getnode(config, path)
-
- def collect_by_name(
- self, modcol: Collector, name: str
- ) -> Optional[Union[Item, Collector]]:
- """Return the collection node for name from the module collection.
-
- Searches a module collection node for a collection node matching the
- given name.
-
- :param modcol: A module collection node; see :py:meth:`getmodulecol`.
- :param name: The name of the node to return.
- """
- if modcol not in self._mod_collections:
- self._mod_collections[modcol] = list(modcol.collect())
- for colitem in self._mod_collections[modcol]:
- if colitem.name == name:
- return colitem
- return None
-
- def popen(
- self,
- cmdargs: Sequence[Union[str, "os.PathLike[str]"]],
- stdout: Union[int, TextIO] = subprocess.PIPE,
- stderr: Union[int, TextIO] = subprocess.PIPE,
- stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN,
- **kw,
- ):
- """Invoke :py:class:`subprocess.Popen`.
-
- Calls :py:class:`subprocess.Popen` making sure the current working
- directory is in ``PYTHONPATH``.
-
- You probably want to use :py:meth:`run` instead.
- """
- env = os.environ.copy()
- env["PYTHONPATH"] = os.pathsep.join(
- filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
- )
- kw["env"] = env
-
- if stdin is self.CLOSE_STDIN:
- kw["stdin"] = subprocess.PIPE
- elif isinstance(stdin, bytes):
- kw["stdin"] = subprocess.PIPE
- else:
- kw["stdin"] = stdin
-
- popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
- if stdin is self.CLOSE_STDIN:
- assert popen.stdin is not None
- popen.stdin.close()
- elif isinstance(stdin, bytes):
- assert popen.stdin is not None
- popen.stdin.write(stdin)
-
- return popen
-
- def run(
- self,
- *cmdargs: Union[str, "os.PathLike[str]"],
- timeout: Optional[float] = None,
- stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN,
- ) -> RunResult:
- """Run a command with arguments.
-
- Run a process using :py:class:`subprocess.Popen` saving the stdout and
- stderr.
-
- :param cmdargs:
- The sequence of arguments to pass to :py:class:`subprocess.Popen`,
- with path-like objects being converted to :py:class:`str`
- automatically.
- :param timeout:
- The period in seconds after which to timeout and raise
- :py:class:`Pytester.TimeoutExpired`.
- :param stdin:
- Optional standard input.
-
- - If it is :py:attr:`CLOSE_STDIN` (Default), then this method calls
- :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and
- the standard input is closed immediately after the new command is
- started.
-
- - If it is of type :py:class:`bytes`, these bytes are sent to the
- standard input of the command.
-
- - Otherwise, it is passed through to :py:class:`subprocess.Popen`.
- For further information in this case, consult the document of the
- ``stdin`` parameter in :py:class:`subprocess.Popen`.
- """
- __tracebackhide__ = True
-
- cmdargs = tuple(os.fspath(arg) for arg in cmdargs)
- p1 = self.path.joinpath("stdout")
- p2 = self.path.joinpath("stderr")
- print("running:", *cmdargs)
- print(" in:", Path.cwd())
-
- with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2:
- now = timing.time()
- popen = self.popen(
- cmdargs,
- stdin=stdin,
- stdout=f1,
- stderr=f2,
- close_fds=(sys.platform != "win32"),
- )
- if popen.stdin is not None:
- popen.stdin.close()
-
- def handle_timeout() -> None:
- __tracebackhide__ = True
-
- timeout_message = (
- "{seconds} second timeout expired running:"
- " {command}".format(seconds=timeout, command=cmdargs)
- )
-
- popen.kill()
- popen.wait()
- raise self.TimeoutExpired(timeout_message)
-
- if timeout is None:
- ret = popen.wait()
- else:
- try:
- ret = popen.wait(timeout)
- except subprocess.TimeoutExpired:
- handle_timeout()
-
- with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2:
- out = f1.read().splitlines()
- err = f2.read().splitlines()
-
- self._dump_lines(out, sys.stdout)
- self._dump_lines(err, sys.stderr)
-
- with contextlib.suppress(ValueError):
- ret = ExitCode(ret)
- return RunResult(ret, out, err, timing.time() - now)
-
- def _dump_lines(self, lines, fp):
- try:
- for line in lines:
- print(line, file=fp)
- except UnicodeEncodeError:
- print(f"couldn't print to {fp} because of encoding")
-
- def _getpytestargs(self) -> Tuple[str, ...]:
- return sys.executable, "-mpytest"
-
- def runpython(self, script: "os.PathLike[str]") -> RunResult:
- """Run a python script using sys.executable as interpreter."""
- return self.run(sys.executable, script)
-
- def runpython_c(self, command: str) -> RunResult:
- """Run ``python -c "command"``."""
- return self.run(sys.executable, "-c", command)
-
- def runpytest_subprocess(
- self, *args: Union[str, "os.PathLike[str]"], timeout: Optional[float] = None
- ) -> RunResult:
- """Run pytest as a subprocess with given arguments.
-
- Any plugins added to the :py:attr:`plugins` list will be added using the
- ``-p`` command line option. Additionally ``--basetemp`` is used to put
- any temporary files and directories in a numbered directory prefixed
- with "runpytest-" to not conflict with the normal numbered pytest
- location for temporary files and directories.
-
- :param args:
- The sequence of arguments to pass to the pytest subprocess.
- :param timeout:
- The period in seconds after which to timeout and raise
- :py:class:`Pytester.TimeoutExpired`.
- """
- __tracebackhide__ = True
- p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700)
- args = ("--basetemp=%s" % p,) + args
- plugins = [x for x in self.plugins if isinstance(x, str)]
- if plugins:
- args = ("-p", plugins[0]) + args
- args = self._getpytestargs() + args
- return self.run(*args, timeout=timeout)
-
- def spawn_pytest(
- self, string: str, expect_timeout: float = 10.0
- ) -> "pexpect.spawn":
- """Run pytest using pexpect.
-
- This makes sure to use the right pytest and sets up the temporary
- directory locations.
-
- The pexpect child is returned.
- """
- basetemp = self.path / "temp-pexpect"
- basetemp.mkdir(mode=0o700)
- invoke = " ".join(map(str, self._getpytestargs()))
- cmd = f"{invoke} --basetemp={basetemp} {string}"
- return self.spawn(cmd, expect_timeout=expect_timeout)
-
- def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
- """Run a command using pexpect.
-
- The pexpect child is returned.
- """
- pexpect = importorskip("pexpect", "3.0")
- if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
- skip("pypy-64 bit not supported")
- if not hasattr(pexpect, "spawn"):
- skip("pexpect.spawn not available")
- logfile = self.path.joinpath("spawn.out").open("wb")
-
- child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout)
- self._request.addfinalizer(logfile.close)
- return child
-
-
-class LineComp:
- def __init__(self) -> None:
- self.stringio = StringIO()
- """:class:`python:io.StringIO()` instance used for input."""
-
- def assert_contains_lines(self, lines2: Sequence[str]) -> None:
- """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value.
-
- Lines are matched using :func:`LineMatcher.fnmatch_lines <pytest.LineMatcher.fnmatch_lines>`.
- """
- __tracebackhide__ = True
- val = self.stringio.getvalue()
- self.stringio.truncate(0)
- self.stringio.seek(0)
- lines1 = val.split("\n")
- LineMatcher(lines1).fnmatch_lines(lines2)
-
-
-class LineMatcher:
- """Flexible matching of text.
-
- This is a convenience class to test large texts like the output of
- commands.
-
- The constructor takes a list of lines without their trailing newlines, i.e.
- ``text.splitlines()``.
- """
-
- def __init__(self, lines: List[str]) -> None:
- self.lines = lines
- self._log_output: List[str] = []
-
- def __str__(self) -> str:
- """Return the entire original text.
-
- .. versionadded:: 6.2
- You can use :meth:`str` in older versions.
- """
- return "\n".join(self.lines)
-
- def _getlines(self, lines2: Union[str, Sequence[str], Source]) -> Sequence[str]:
- if isinstance(lines2, str):
- lines2 = Source(lines2)
- if isinstance(lines2, Source):
- lines2 = lines2.strip().lines
- return lines2
-
- def fnmatch_lines_random(self, lines2: Sequence[str]) -> None:
- """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`)."""
- __tracebackhide__ = True
- self._match_lines_random(lines2, fnmatch)
-
- def re_match_lines_random(self, lines2: Sequence[str]) -> None:
- """Check lines exist in the output in any order (using :func:`python:re.match`)."""
- __tracebackhide__ = True
- self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name)))
-
- def _match_lines_random(
- self, lines2: Sequence[str], match_func: Callable[[str, str], bool]
- ) -> None:
- __tracebackhide__ = True
- lines2 = self._getlines(lines2)
- for line in lines2:
- for x in self.lines:
- if line == x or match_func(x, line):
- self._log("matched: ", repr(line))
- break
- else:
- msg = "line %r not found in output" % line
- self._log(msg)
- self._fail(msg)
-
- def get_lines_after(self, fnline: str) -> Sequence[str]:
- """Return all lines following the given line in the text.
-
- The given line can contain glob wildcards.
- """
- for i, line in enumerate(self.lines):
- if fnline == line or fnmatch(line, fnline):
- return self.lines[i + 1 :]
- raise ValueError("line %r not found in output" % fnline)
-
- def _log(self, *args) -> None:
- self._log_output.append(" ".join(str(x) for x in args))
-
- @property
- def _log_text(self) -> str:
- return "\n".join(self._log_output)
-
- def fnmatch_lines(
- self, lines2: Sequence[str], *, consecutive: bool = False
- ) -> None:
- """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`).
-
- The argument is a list of lines which have to match and can use glob
- wildcards. If they do not match a pytest.fail() is called. The
- matches and non-matches are also shown as part of the error message.
-
- :param lines2: String patterns to match.
- :param consecutive: Match lines consecutively?
- """
- __tracebackhide__ = True
- self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive)
-
- def re_match_lines(
- self, lines2: Sequence[str], *, consecutive: bool = False
- ) -> None:
- """Check lines exist in the output (using :func:`python:re.match`).
-
- The argument is a list of lines which have to match using ``re.match``.
- If they do not match a pytest.fail() is called.
-
- The matches and non-matches are also shown as part of the error message.
-
- :param lines2: string patterns to match.
- :param consecutive: match lines consecutively?
- """
- __tracebackhide__ = True
- self._match_lines(
- lines2,
- lambda name, pat: bool(re.match(pat, name)),
- "re.match",
- consecutive=consecutive,
- )
-
- def _match_lines(
- self,
- lines2: Sequence[str],
- match_func: Callable[[str, str], bool],
- match_nickname: str,
- *,
- consecutive: bool = False,
- ) -> None:
- """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
-
- :param Sequence[str] lines2:
- List of string patterns to match. The actual format depends on
- ``match_func``.
- :param match_func:
- A callable ``match_func(line, pattern)`` where line is the
- captured line from stdout/stderr and pattern is the matching
- pattern.
- :param str match_nickname:
- The nickname for the match function that will be logged to stdout
- when a match occurs.
- :param consecutive:
- Match lines consecutively?
- """
- if not isinstance(lines2, collections.abc.Sequence):
- raise TypeError(f"invalid type for lines2: {type(lines2).__name__}")
- lines2 = self._getlines(lines2)
- lines1 = self.lines[:]
- extralines = []
- __tracebackhide__ = True
- wnick = len(match_nickname) + 1
- started = False
- for line in lines2:
- nomatchprinted = False
- while lines1:
- nextline = lines1.pop(0)
- if line == nextline:
- self._log("exact match:", repr(line))
- started = True
- break
- elif match_func(nextline, line):
- self._log("%s:" % match_nickname, repr(line))
- self._log(
- "{:>{width}}".format("with:", width=wnick), repr(nextline)
- )
- started = True
- break
- else:
- if consecutive and started:
- msg = f"no consecutive match: {line!r}"
- self._log(msg)
- self._log(
- "{:>{width}}".format("with:", width=wnick), repr(nextline)
- )
- self._fail(msg)
- if not nomatchprinted:
- self._log(
- "{:>{width}}".format("nomatch:", width=wnick), repr(line)
- )
- nomatchprinted = True
- self._log("{:>{width}}".format("and:", width=wnick), repr(nextline))
- extralines.append(nextline)
- else:
- msg = f"remains unmatched: {line!r}"
- self._log(msg)
- self._fail(msg)
- self._log_output = []
-
- def no_fnmatch_line(self, pat: str) -> None:
- """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``.
-
- :param str pat: The pattern to match lines.
- """
- __tracebackhide__ = True
- self._no_match_line(pat, fnmatch, "fnmatch")
-
- def no_re_match_line(self, pat: str) -> None:
- """Ensure captured lines do not match the given pattern, using ``re.match``.
-
- :param str pat: The regular expression to match lines.
- """
- __tracebackhide__ = True
- self._no_match_line(
- pat, lambda name, pat: bool(re.match(pat, name)), "re.match"
- )
-
- def _no_match_line(
- self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str
- ) -> None:
- """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``.
-
- :param str pat: The pattern to match lines.
- """
- __tracebackhide__ = True
- nomatch_printed = False
- wnick = len(match_nickname) + 1
- for line in self.lines:
- if match_func(line, pat):
- msg = f"{match_nickname}: {pat!r}"
- self._log(msg)
- self._log("{:>{width}}".format("with:", width=wnick), repr(line))
- self._fail(msg)
- else:
- if not nomatch_printed:
- self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat))
- nomatch_printed = True
- self._log("{:>{width}}".format("and:", width=wnick), repr(line))
- self._log_output = []
-
- def _fail(self, msg: str) -> None:
- __tracebackhide__ = True
- log_text = self._log_text
- self._log_output = []
- fail(log_text)
-
- def str(self) -> str:
- """Return the entire original text."""
- return str(self)
diff --git a/contrib/python/pytest/py3/_pytest/pytester_assertions.py b/contrib/python/pytest/py3/_pytest/pytester_assertions.py
deleted file mode 100644
index 657e4db5fc..0000000000
--- a/contrib/python/pytest/py3/_pytest/pytester_assertions.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""Helper plugin for pytester; should not be loaded on its own."""
-# This plugin contains assertions used by pytester. pytester cannot
-# contain them itself, since it is imported by the `pytest` module,
-# hence cannot be subject to assertion rewriting, which requires a
-# module to not be already imported.
-from typing import Dict
-from typing import Optional
-from typing import Sequence
-from typing import Tuple
-from typing import Union
-
-from _pytest.reports import CollectReport
-from _pytest.reports import TestReport
-
-
-def assertoutcome(
- outcomes: Tuple[
- Sequence[TestReport],
- Sequence[Union[CollectReport, TestReport]],
- Sequence[Union[CollectReport, TestReport]],
- ],
- passed: int = 0,
- skipped: int = 0,
- failed: int = 0,
-) -> None:
- __tracebackhide__ = True
-
- realpassed, realskipped, realfailed = outcomes
- obtained = {
- "passed": len(realpassed),
- "skipped": len(realskipped),
- "failed": len(realfailed),
- }
- expected = {"passed": passed, "skipped": skipped, "failed": failed}
- assert obtained == expected, outcomes
-
-
-def assert_outcomes(
- outcomes: Dict[str, int],
- passed: int = 0,
- skipped: int = 0,
- failed: int = 0,
- errors: int = 0,
- xpassed: int = 0,
- xfailed: int = 0,
- warnings: Optional[int] = None,
- deselected: Optional[int] = None,
-) -> None:
- """Assert that the specified outcomes appear with the respective
- numbers (0 means it didn't occur) in the text output from a test run."""
- __tracebackhide__ = True
-
- obtained = {
- "passed": outcomes.get("passed", 0),
- "skipped": outcomes.get("skipped", 0),
- "failed": outcomes.get("failed", 0),
- "errors": outcomes.get("errors", 0),
- "xpassed": outcomes.get("xpassed", 0),
- "xfailed": outcomes.get("xfailed", 0),
- }
- expected = {
- "passed": passed,
- "skipped": skipped,
- "failed": failed,
- "errors": errors,
- "xpassed": xpassed,
- "xfailed": xfailed,
- }
- if warnings is not None:
- obtained["warnings"] = outcomes.get("warnings", 0)
- expected["warnings"] = warnings
- if deselected is not None:
- obtained["deselected"] = outcomes.get("deselected", 0)
- expected["deselected"] = deselected
- assert obtained == expected
diff --git a/contrib/python/pytest/py3/_pytest/python.py b/contrib/python/pytest/py3/_pytest/python.py
deleted file mode 100644
index 2855880a4e..0000000000
--- a/contrib/python/pytest/py3/_pytest/python.py
+++ /dev/null
@@ -1,1820 +0,0 @@
-"""Python test discovery, setup and run of test functions."""
-import enum
-import fnmatch
-import inspect
-import itertools
-import os
-import sys
-import types
-import warnings
-from collections import Counter
-from collections import defaultdict
-from functools import partial
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import Dict
-from typing import Generator
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Pattern
-from typing import Sequence
-from typing import Set
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-
-import _pytest
-from _pytest import fixtures
-from _pytest import nodes
-from _pytest._code import filter_traceback
-from _pytest._code import getfslineno
-from _pytest._code.code import ExceptionInfo
-from _pytest._code.code import TerminalRepr
-from _pytest._io import TerminalWriter
-from _pytest._io.saferepr import saferepr
-from _pytest.compat import ascii_escaped
-from _pytest.compat import assert_never
-from _pytest.compat import final
-from _pytest.compat import get_default_arg_names
-from _pytest.compat import get_real_func
-from _pytest.compat import getimfunc
-from _pytest.compat import getlocation
-from _pytest.compat import is_async_function
-from _pytest.compat import is_generator
-from _pytest.compat import LEGACY_PATH
-from _pytest.compat import NOTSET
-from _pytest.compat import safe_getattr
-from _pytest.compat import safe_isclass
-from _pytest.compat import STRING_TYPES
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH
-from _pytest.deprecated import INSTANCE_COLLECTOR
-from _pytest.fixtures import FuncFixtureInfo
-from _pytest.main import Session
-from _pytest.mark import MARK_GEN
-from _pytest.mark import ParameterSet
-from _pytest.mark.structures import get_unpacked_marks
-from _pytest.mark.structures import Mark
-from _pytest.mark.structures import MarkDecorator
-from _pytest.mark.structures import normalize_mark_list
-from _pytest.outcomes import fail
-from _pytest.outcomes import skip
-from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import fnmatch_ex
-from _pytest.pathlib import import_path
-from _pytest.pathlib import ImportPathMismatchError
-from _pytest.pathlib import parts
-from _pytest.pathlib import visit
-from _pytest.scope import Scope
-from _pytest.warning_types import PytestCollectionWarning
-from _pytest.warning_types import PytestUnhandledCoroutineWarning
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
- from _pytest.scope import _ScopeName
-
-
-_PYTEST_DIR = Path(_pytest.__file__).parent
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--fixtures",
- "--funcargs",
- action="store_true",
- dest="showfixtures",
- default=False,
- help="show available fixtures, sorted by plugin appearance "
- "(fixtures with leading '_' are only shown with '-v')",
- )
- group.addoption(
- "--fixtures-per-test",
- action="store_true",
- dest="show_fixtures_per_test",
- default=False,
- help="show fixtures per test",
- )
- parser.addini(
- "python_files",
- type="args",
- # NOTE: default is also used in AssertionRewritingHook.
- default=["test_*.py", "*_test.py"],
- help="glob-style file patterns for Python test module discovery",
- )
- parser.addini(
- "python_classes",
- type="args",
- default=["Test"],
- help="prefixes or glob names for Python test class discovery",
- )
- parser.addini(
- "python_functions",
- type="args",
- default=["test"],
- help="prefixes or glob names for Python test function and method discovery",
- )
- parser.addini(
- "disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
- type="bool",
- default=False,
- help="disable string escape non-ascii characters, might cause unwanted "
- "side effects(use at your own risk)",
- )
-
-
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- if config.option.showfixtures:
- showfixtures(config)
- return 0
- if config.option.show_fixtures_per_test:
- show_fixtures_per_test(config)
- return 0
- return None
-
-
-def pytest_generate_tests(metafunc: "Metafunc") -> None:
- for marker in metafunc.definition.iter_markers(name="parametrize"):
- metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker)
-
-
-def pytest_configure(config: Config) -> None:
- config.addinivalue_line(
- "markers",
- "parametrize(argnames, argvalues): call a test function multiple "
- "times passing in different arguments in turn. argvalues generally "
- "needs to be a list of values if argnames specifies only one name "
- "or a list of tuples of values if argnames specifies multiple names. "
- "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
- "decorated test function, one with arg1=1 and another with arg1=2."
- "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info "
- "and examples.",
- )
- config.addinivalue_line(
- "markers",
- "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
- "all of the specified fixtures. see "
- "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ",
- )
-
-
-def async_warn_and_skip(nodeid: str) -> None:
- msg = "async def functions are not natively supported and have been skipped.\n"
- msg += (
- "You need to install a suitable plugin for your async framework, for example:\n"
- )
- msg += " - anyio\n"
- msg += " - pytest-asyncio\n"
- msg += " - pytest-tornasync\n"
- msg += " - pytest-trio\n"
- msg += " - pytest-twisted"
- warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid)))
- skip(reason="async def function and no async plugin installed (see warnings)")
-
-
-@hookimpl(trylast=True)
-def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
- testfunction = pyfuncitem.obj
- if is_async_function(testfunction):
- async_warn_and_skip(pyfuncitem.nodeid)
- funcargs = pyfuncitem.funcargs
- testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
- result = testfunction(**testargs)
- if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
- async_warn_and_skip(pyfuncitem.nodeid)
- return True
-
-
-def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]:
- if file_path.suffix == ".py":
- if not parent.session.isinitpath(file_path):
- if not path_matches_patterns(
- file_path, parent.config.getini("python_files") + ["__init__.py"]
- ):
- return None
- ihook = parent.session.gethookproxy(file_path)
- module: Module = ihook.pytest_pycollect_makemodule(
- module_path=file_path, parent=parent
- )
- return module
- return None
-
-
-def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool:
- """Return whether path matches any of the patterns in the list of globs given."""
- return any(fnmatch_ex(pattern, path) for pattern in patterns)
-
-
-def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module":
- if module_path.name == "__init__.py":
- pkg: Package = Package.from_parent(parent, path=module_path)
- return pkg
- mod: Module = Module.from_parent(parent, path=module_path)
- return mod
-
-
-@hookimpl(trylast=True)
-def pytest_pycollect_makeitem(
- collector: Union["Module", "Class"], name: str, obj: object
-) -> Union[None, nodes.Item, nodes.Collector, List[Union[nodes.Item, nodes.Collector]]]:
- assert isinstance(collector, (Class, Module)), type(collector)
- # Nothing was collected elsewhere, let's do it here.
- if safe_isclass(obj):
- if collector.istestclass(obj, name):
- klass: Class = Class.from_parent(collector, name=name, obj=obj)
- return klass
- elif collector.istestfunction(obj, name):
- # mock seems to store unbound methods (issue473), normalize it.
- obj = getattr(obj, "__func__", obj)
- # We need to try and unwrap the function if it's a functools.partial
- # or a functools.wrapped.
- # We mustn't if it's been wrapped with mock.patch (python 2 only).
- if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
- filename, lineno = getfslineno(obj)
- warnings.warn_explicit(
- message=PytestCollectionWarning(
- "cannot collect %r because it is not a function." % name
- ),
- category=None,
- filename=str(filename),
- lineno=lineno + 1,
- )
- elif getattr(obj, "__test__", True):
- if is_generator(obj):
- res: Function = Function.from_parent(collector, name=name)
- reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format(
- name=name
- )
- res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
- res.warn(PytestCollectionWarning(reason))
- return res
- else:
- return list(collector._genfunctions(name, obj))
- return None
-
-
-class PyobjMixin(nodes.Node):
- """this mix-in inherits from Node to carry over the typing information
-
- as its intended to always mix in before a node
- its position in the mro is unaffected"""
-
- _ALLOW_MARKERS = True
-
- @property
- def module(self):
- """Python module object this node was collected from (can be None)."""
- node = self.getparent(Module)
- return node.obj if node is not None else None
-
- @property
- def cls(self):
- """Python class object this node was collected from (can be None)."""
- node = self.getparent(Class)
- return node.obj if node is not None else None
-
- @property
- def instance(self):
- """Python instance object the function is bound to.
-
- Returns None if not a test method, e.g. for a standalone test function,
- a staticmethod, a class or a module.
- """
- node = self.getparent(Function)
- return getattr(node.obj, "__self__", None) if node is not None else None
-
- @property
- def obj(self):
- """Underlying Python object."""
- obj = getattr(self, "_obj", None)
- if obj is None:
- self._obj = obj = self._getobj()
- # XXX evil hack
- # used to avoid Function marker duplication
- if self._ALLOW_MARKERS:
- self.own_markers.extend(get_unpacked_marks(self.obj))
- # This assumes that `obj` is called before there is a chance
- # to add custom keys to `self.keywords`, so no fear of overriding.
- self.keywords.update((mark.name, mark) for mark in self.own_markers)
- return obj
-
- @obj.setter
- def obj(self, value):
- self._obj = value
-
- def _getobj(self):
- """Get the underlying Python object. May be overwritten by subclasses."""
- # TODO: Improve the type of `parent` such that assert/ignore aren't needed.
- assert self.parent is not None
- obj = self.parent.obj # type: ignore[attr-defined]
- return getattr(obj, self.name)
-
- def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
- """Return Python path relative to the containing module."""
- chain = self.listchain()
- chain.reverse()
- parts = []
- for node in chain:
- name = node.name
- if isinstance(node, Module):
- name = os.path.splitext(name)[0]
- if stopatmodule:
- if includemodule:
- parts.append(name)
- break
- parts.append(name)
- parts.reverse()
- return ".".join(parts)
-
- def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
- # XXX caching?
- obj = self.obj
- compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
- if isinstance(compat_co_firstlineno, int):
- # nose compatibility
- file_path = sys.modules[obj.__module__].__file__
- assert file_path is not None
- if file_path.endswith(".pyc"):
- file_path = file_path[:-1]
- path: Union["os.PathLike[str]", str] = file_path
- lineno = compat_co_firstlineno
- else:
- path, lineno = getfslineno(obj)
- modpath = self.getmodpath()
- assert isinstance(lineno, int)
- return path, lineno, modpath
-
-
-# As an optimization, these builtin attribute names are pre-ignored when
-# iterating over an object during collection -- the pytest_pycollect_makeitem
-# hook is not called for them.
-# fmt: off
-class _EmptyClass: pass # noqa: E701
-IGNORED_ATTRIBUTES = frozenset.union( # noqa: E305
- frozenset(),
- # Module.
- dir(types.ModuleType("empty_module")),
- # Some extra module attributes the above doesn't catch.
- {"__builtins__", "__file__", "__cached__"},
- # Class.
- dir(_EmptyClass),
- # Instance.
- dir(_EmptyClass()),
-)
-del _EmptyClass
-# fmt: on
-
-
-class PyCollector(PyobjMixin, nodes.Collector):
- def funcnamefilter(self, name: str) -> bool:
- return self._matches_prefix_or_glob_option("python_functions", name)
-
- def isnosetest(self, obj: object) -> bool:
- """Look for the __test__ attribute, which is applied by the
- @nose.tools.istest decorator.
- """
- # We explicitly check for "is True" here to not mistakenly treat
- # classes with a custom __getattr__ returning something truthy (like a
- # function) as test classes.
- return safe_getattr(obj, "__test__", False) is True
-
- def classnamefilter(self, name: str) -> bool:
- return self._matches_prefix_or_glob_option("python_classes", name)
-
- def istestfunction(self, obj: object, name: str) -> bool:
- if self.funcnamefilter(name) or self.isnosetest(obj):
- if isinstance(obj, staticmethod):
- # staticmethods need to be unwrapped.
- obj = safe_getattr(obj, "__func__", False)
- return callable(obj) and fixtures.getfixturemarker(obj) is None
- else:
- return False
-
- def istestclass(self, obj: object, name: str) -> bool:
- return self.classnamefilter(name) or self.isnosetest(obj)
-
- def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
- """Check if the given name matches the prefix or glob-pattern defined
- in ini configuration."""
- for option in self.config.getini(option_name):
- if name.startswith(option):
- return True
- # Check that name looks like a glob-string before calling fnmatch
- # because this is called for every name in each collected module,
- # and fnmatch is somewhat expensive to call.
- elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
- name, option
- ):
- return True
- return False
-
- def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
- if not getattr(self.obj, "__test__", True):
- return []
-
- # Avoid random getattrs and peek in the __dict__ instead.
- dicts = [getattr(self.obj, "__dict__", {})]
- if isinstance(self.obj, type):
- for basecls in self.obj.__mro__:
- dicts.append(basecls.__dict__)
-
- # In each class, nodes should be definition ordered.
- # __dict__ is definition ordered.
- seen: Set[str] = set()
- dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = []
- ihook = self.ihook
- for dic in dicts:
- values: List[Union[nodes.Item, nodes.Collector]] = []
- # Note: seems like the dict can change during iteration -
- # be careful not to remove the list() without consideration.
- for name, obj in list(dic.items()):
- if name in IGNORED_ATTRIBUTES:
- continue
- if name in seen:
- continue
- seen.add(name)
- res = ihook.pytest_pycollect_makeitem(
- collector=self, name=name, obj=obj
- )
- if res is None:
- continue
- elif isinstance(res, list):
- values.extend(res)
- else:
- values.append(res)
- dict_values.append(values)
-
- # Between classes in the class hierarchy, reverse-MRO order -- nodes
- # inherited from base classes should come before subclasses.
- result = []
- for values in reversed(dict_values):
- result.extend(values)
- return result
-
- def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]:
- modulecol = self.getparent(Module)
- assert modulecol is not None
- module = modulecol.obj
- clscol = self.getparent(Class)
- cls = clscol and clscol.obj or None
-
- definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
- fixtureinfo = definition._fixtureinfo
-
- # pytest_generate_tests impls call metafunc.parametrize() which fills
- # metafunc._calls, the outcome of the hook.
- metafunc = Metafunc(
- definition=definition,
- fixtureinfo=fixtureinfo,
- config=self.config,
- cls=cls,
- module=module,
- _ispytest=True,
- )
- methods = []
- if hasattr(module, "pytest_generate_tests"):
- methods.append(module.pytest_generate_tests)
- if cls is not None and hasattr(cls, "pytest_generate_tests"):
- methods.append(cls().pytest_generate_tests)
- self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
-
- if not metafunc._calls:
- yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
- else:
- # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs.
- fm = self.session._fixturemanager
- fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
-
- # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures
- # with direct parametrization, so make sure we update what the
- # function really needs.
- fixtureinfo.prune_dependency_tree()
-
- for callspec in metafunc._calls:
- subname = f"{name}[{callspec.id}]"
- yield Function.from_parent(
- self,
- name=subname,
- callspec=callspec,
- fixtureinfo=fixtureinfo,
- keywords={callspec.id: True},
- originalname=name,
- )
-
-
-class Module(nodes.File, PyCollector):
- """Collector for test classes and functions."""
-
- def _getobj(self):
- return self._importtestmodule()
-
- def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
- self._inject_setup_module_fixture()
- self._inject_setup_function_fixture()
- self.session._fixturemanager.parsefactories(self)
- return super().collect()
-
- def _inject_setup_module_fixture(self) -> None:
- """Inject a hidden autouse, module scoped fixture into the collected module object
- that invokes setUpModule/tearDownModule if either or both are available.
-
- Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
- other fixtures (#517).
- """
- has_nose = self.config.pluginmanager.has_plugin("nose")
- setup_module = _get_first_non_fixture_func(
- self.obj, ("setUpModule", "setup_module")
- )
- if setup_module is None and has_nose:
- # The name "setup" is too common - only treat as fixture if callable.
- setup_module = _get_first_non_fixture_func(self.obj, ("setup",))
- if not callable(setup_module):
- setup_module = None
- teardown_module = _get_first_non_fixture_func(
- self.obj, ("tearDownModule", "teardown_module")
- )
- if teardown_module is None and has_nose:
- teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",))
- # Same as "setup" above - only treat as fixture if callable.
- if not callable(teardown_module):
- teardown_module = None
-
- if setup_module is None and teardown_module is None:
- return
-
- @fixtures.fixture(
- autouse=True,
- scope="module",
- # Use a unique name to speed up lookup.
- name=f"_xunit_setup_module_fixture_{self.obj.__name__}",
- )
- def xunit_setup_module_fixture(request) -> Generator[None, None, None]:
- if setup_module is not None:
- _call_with_optional_argument(setup_module, request.module)
- yield
- if teardown_module is not None:
- _call_with_optional_argument(teardown_module, request.module)
-
- self.obj.__pytest_setup_module = xunit_setup_module_fixture
-
- def _inject_setup_function_fixture(self) -> None:
- """Inject a hidden autouse, function scoped fixture into the collected module object
- that invokes setup_function/teardown_function if either or both are available.
-
- Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
- other fixtures (#517).
- """
- setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
- teardown_function = _get_first_non_fixture_func(
- self.obj, ("teardown_function",)
- )
- if setup_function is None and teardown_function is None:
- return
-
- @fixtures.fixture(
- autouse=True,
- scope="function",
- # Use a unique name to speed up lookup.
- name=f"_xunit_setup_function_fixture_{self.obj.__name__}",
- )
- def xunit_setup_function_fixture(request) -> Generator[None, None, None]:
- if request.instance is not None:
- # in this case we are bound to an instance, so we need to let
- # setup_method handle this
- yield
- return
- if setup_function is not None:
- _call_with_optional_argument(setup_function, request.function)
- yield
- if teardown_function is not None:
- _call_with_optional_argument(teardown_function, request.function)
-
- self.obj.__pytest_setup_function = xunit_setup_function_fixture
-
- def _importtestmodule(self):
- # We assume we are only called once per module.
- importmode = self.config.getoption("--import-mode")
- try:
- mod = import_path(self.path, mode=importmode, root=self.config.rootpath)
- except SyntaxError as e:
- raise self.CollectError(
- ExceptionInfo.from_current().getrepr(style="short")
- ) from e
- except ImportPathMismatchError as e:
- raise self.CollectError(
- "import file mismatch:\n"
- "imported module %r has this __file__ attribute:\n"
- " %s\n"
- "which is not the same as the test file we want to collect:\n"
- " %s\n"
- "HINT: remove __pycache__ / .pyc files and/or use a "
- "unique basename for your test file modules" % e.args
- ) from e
- except ImportError as e:
- exc_info = ExceptionInfo.from_current()
- if self.config.getoption("verbose") < 2:
- exc_info.traceback = exc_info.traceback.filter(filter_traceback)
- exc_repr = (
- exc_info.getrepr(style="short")
- if exc_info.traceback
- else exc_info.exconly()
- )
- formatted_tb = str(exc_repr)
- raise self.CollectError(
- "ImportError while importing test module '{path}'.\n"
- "Hint: make sure your test modules/packages have valid Python names.\n"
- "Traceback:\n"
- "{traceback}".format(path=self.path, traceback=formatted_tb)
- ) from e
- except skip.Exception as e:
- if e.allow_module_level:
- raise
- raise self.CollectError(
- "Using pytest.skip outside of a test will skip the entire module. "
- "If that's your intention, pass `allow_module_level=True`. "
- "If you want to skip a specific test or an entire class, "
- "use the @pytest.mark.skip or @pytest.mark.skipif decorators."
- ) from e
- self.config.pluginmanager.consider_module(mod)
- return mod
-
-
-class Package(Module):
- def __init__(
- self,
- fspath: Optional[LEGACY_PATH],
- parent: nodes.Collector,
- # NOTE: following args are unused:
- config=None,
- session=None,
- nodeid=None,
- path=Optional[Path],
- ) -> None:
- # NOTE: Could be just the following, but kept as-is for compat.
- # nodes.FSCollector.__init__(self, fspath, parent=parent)
- session = parent.session
- nodes.FSCollector.__init__(
- self,
- fspath=fspath,
- path=path,
- parent=parent,
- config=config,
- session=session,
- nodeid=nodeid,
- )
- self.name = self.path.parent.name
-
- def setup(self) -> None:
- # Not using fixtures to call setup_module here because autouse fixtures
- # from packages are not called automatically (#4085).
- setup_module = _get_first_non_fixture_func(
- self.obj, ("setUpModule", "setup_module")
- )
- if setup_module is not None:
- _call_with_optional_argument(setup_module, self.obj)
-
- teardown_module = _get_first_non_fixture_func(
- self.obj, ("tearDownModule", "teardown_module")
- )
- if teardown_module is not None:
- func = partial(_call_with_optional_argument, teardown_module, self.obj)
- self.addfinalizer(func)
-
- def gethookproxy(self, fspath: "os.PathLike[str]"):
- warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
- return self.session.gethookproxy(fspath)
-
- def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
- warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
- return self.session.isinitpath(path)
-
- def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
- if direntry.name == "__pycache__":
- return False
- fspath = Path(direntry.path)
- ihook = self.session.gethookproxy(fspath.parent)
- if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
- return False
- norecursepatterns = self.config.getini("norecursedirs")
- if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns):
- return False
- return True
-
- def _collectfile(
- self, fspath: Path, handle_dupes: bool = True
- ) -> Sequence[nodes.Collector]:
- assert (
- fspath.is_file()
- ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
- fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink()
- )
- ihook = self.session.gethookproxy(fspath)
- if not self.session.isinitpath(fspath):
- if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
- return ()
-
- if handle_dupes:
- keepduplicates = self.config.getoption("keepduplicates")
- if not keepduplicates:
- duplicate_paths = self.config.pluginmanager._duplicatepaths
- if fspath in duplicate_paths:
- return ()
- else:
- duplicate_paths.add(fspath)
-
- return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return]
-
- def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
- this_path = self.path.parent
- init_module = this_path / "__init__.py"
- if init_module.is_file() and path_matches_patterns(
- init_module, self.config.getini("python_files")
- ):
- yield Module.from_parent(self, path=init_module)
- pkg_prefixes: Set[Path] = set()
- for direntry in visit(str(this_path), recurse=self._recurse):
- path = Path(direntry.path)
-
- # We will visit our own __init__.py file, in which case we skip it.
- if direntry.is_file():
- if direntry.name == "__init__.py" and path.parent == this_path:
- continue
-
- parts_ = parts(direntry.path)
- if any(
- str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path
- for pkg_prefix in pkg_prefixes
- ):
- continue
-
- if direntry.is_file():
- yield from self._collectfile(path)
- elif not direntry.is_dir():
- # Broken symlink or invalid/missing file.
- continue
- elif path.joinpath("__init__.py").is_file():
- pkg_prefixes.add(path)
-
-
-def _call_with_optional_argument(func, arg) -> None:
- """Call the given function with the given argument if func accepts one argument, otherwise
- calls func without arguments."""
- arg_count = func.__code__.co_argcount
- if inspect.ismethod(func):
- arg_count -= 1
- if arg_count:
- func(arg)
- else:
- func()
-
-
-def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]:
- """Return the attribute from the given object to be used as a setup/teardown
- xunit-style function, but only if not marked as a fixture to avoid calling it twice."""
- for name in names:
- meth: Optional[object] = getattr(obj, name, None)
- if meth is not None and fixtures.getfixturemarker(meth) is None:
- return meth
- return None
-
-
-class Class(PyCollector):
- """Collector for test methods."""
-
- @classmethod
- def from_parent(cls, parent, *, name, obj=None, **kw):
- """The public constructor."""
- return super().from_parent(name=name, parent=parent, **kw)
-
- def newinstance(self):
- return self.obj()
-
- def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
- if not safe_getattr(self.obj, "__test__", True):
- return []
- if hasinit(self.obj):
- assert self.parent is not None
- self.warn(
- PytestCollectionWarning(
- "cannot collect test class %r because it has a "
- "__init__ constructor (from: %s)"
- % (self.obj.__name__, self.parent.nodeid)
- )
- )
- return []
- elif hasnew(self.obj):
- assert self.parent is not None
- self.warn(
- PytestCollectionWarning(
- "cannot collect test class %r because it has a "
- "__new__ constructor (from: %s)"
- % (self.obj.__name__, self.parent.nodeid)
- )
- )
- return []
-
- self._inject_setup_class_fixture()
- self._inject_setup_method_fixture()
-
- self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
-
- return super().collect()
-
- def _inject_setup_class_fixture(self) -> None:
- """Inject a hidden autouse, class scoped fixture into the collected class object
- that invokes setup_class/teardown_class if either or both are available.
-
- Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
- other fixtures (#517).
- """
- setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
- teardown_class = getattr(self.obj, "teardown_class", None)
- if setup_class is None and teardown_class is None:
- return
-
- @fixtures.fixture(
- autouse=True,
- scope="class",
- # Use a unique name to speed up lookup.
- name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}",
- )
- def xunit_setup_class_fixture(cls) -> Generator[None, None, None]:
- if setup_class is not None:
- func = getimfunc(setup_class)
- _call_with_optional_argument(func, self.obj)
- yield
- if teardown_class is not None:
- func = getimfunc(teardown_class)
- _call_with_optional_argument(func, self.obj)
-
- self.obj.__pytest_setup_class = xunit_setup_class_fixture
-
- def _inject_setup_method_fixture(self) -> None:
- """Inject a hidden autouse, function scoped fixture into the collected class object
- that invokes setup_method/teardown_method if either or both are available.
-
- Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
- other fixtures (#517).
- """
- has_nose = self.config.pluginmanager.has_plugin("nose")
- setup_name = "setup_method"
- setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
- if setup_method is None and has_nose:
- setup_name = "setup"
- setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
- teardown_name = "teardown_method"
- teardown_method = getattr(self.obj, teardown_name, None)
- if teardown_method is None and has_nose:
- teardown_name = "teardown"
- teardown_method = getattr(self.obj, teardown_name, None)
- if setup_method is None and teardown_method is None:
- return
-
- @fixtures.fixture(
- autouse=True,
- scope="function",
- # Use a unique name to speed up lookup.
- name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}",
- )
- def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]:
- method = request.function
- if setup_method is not None:
- func = getattr(self, setup_name)
- _call_with_optional_argument(func, method)
- yield
- if teardown_method is not None:
- func = getattr(self, teardown_name)
- _call_with_optional_argument(func, method)
-
- self.obj.__pytest_setup_method = xunit_setup_method_fixture
-
-
-class InstanceDummy:
- """Instance used to be a node type between Class and Function. It has been
- removed in pytest 7.0. Some plugins exist which reference `pytest.Instance`
- only to ignore it; this dummy class keeps them working. This will be removed
- in pytest 8."""
-
-
-def __getattr__(name: str) -> object:
- if name == "Instance":
- warnings.warn(INSTANCE_COLLECTOR, 2)
- return InstanceDummy
- raise AttributeError(f"module {__name__} has no attribute {name}")
-
-
-def hasinit(obj: object) -> bool:
- init: object = getattr(obj, "__init__", None)
- if init:
- return init != object.__init__
- return False
-
-
-def hasnew(obj: object) -> bool:
- new: object = getattr(obj, "__new__", None)
- if new:
- return new != object.__new__
- return False
-
-
-@final
-@attr.s(frozen=True, auto_attribs=True, slots=True)
-class IdMaker:
- """Make IDs for a parametrization."""
-
- # The argnames of the parametrization.
- argnames: Sequence[str]
- # The ParameterSets of the parametrization.
- parametersets: Sequence[ParameterSet]
- # Optionally, a user-provided callable to make IDs for parameters in a
- # ParameterSet.
- idfn: Optional[Callable[[Any], Optional[object]]]
- # Optionally, explicit IDs for ParameterSets by index.
- ids: Optional[Sequence[Optional[object]]]
- # Optionally, the pytest config.
- # Used for controlling ASCII escaping, and for calling the
- # :hook:`pytest_make_parametrize_id` hook.
- config: Optional[Config]
- # Optionally, the ID of the node being parametrized.
- # Used only for clearer error messages.
- nodeid: Optional[str]
- # Optionally, the ID of the function being parametrized.
- # Used only for clearer error messages.
- func_name: Optional[str]
-
- def make_unique_parameterset_ids(self) -> List[str]:
- """Make a unique identifier for each ParameterSet, that may be used to
- identify the parametrization in a node ID.
-
- Format is <prm_1_token>-...-<prm_n_token>[counter], where prm_x_token is
- - user-provided id, if given
- - else an id derived from the value, applicable for certain types
- - else <argname><parameterset index>
- The counter suffix is appended only in case a string wouldn't be unique
- otherwise.
- """
- resolved_ids = list(self._limit_ids(self._resolve_ids(), limit=500))
- # All IDs must be unique!
- if len(resolved_ids) != len(set(resolved_ids)):
- # Record the number of occurrences of each ID.
- id_counts = Counter(resolved_ids)
- # Map the ID to its next suffix.
- id_suffixes: Dict[str, int] = defaultdict(int)
- # Suffix non-unique IDs to make them unique.
- for index, id in enumerate(resolved_ids):
- if id_counts[id] > 1:
- resolved_ids[index] = f"{id}{id_suffixes[id]}"
- id_suffixes[id] += 1
- return resolved_ids
-
- def _limit_ids(self, ids, limit=500):
- prefix_count = {}
- limit -= 6
- assert limit > 0
-
- for idval in ids:
- if len(idval) > limit:
- prefix = idval[:limit]
- idx = prefix_count.get(prefix, -1) + 1
- prefix_count[prefix] = idx
- idval = "{}-{}".format(prefix, idx)
- yield idval
-
- def _resolve_ids(self) -> Iterable[str]:
- """Resolve IDs for all ParameterSets (may contain duplicates)."""
- for idx, parameterset in enumerate(self.parametersets):
- if parameterset.id is not None:
- # ID provided directly - pytest.param(..., id="...")
- yield parameterset.id
- elif self.ids and idx < len(self.ids) and self.ids[idx] is not None:
- # ID provided in the IDs list - parametrize(..., ids=[...]).
- yield self._idval_from_value_required(self.ids[idx], idx)
- else:
- # ID not provided - generate it.
- yield "-".join(
- self._idval(val, argname, idx)
- for val, argname in zip(parameterset.values, self.argnames)
- )
-
- def _idval(self, val: object, argname: str, idx: int) -> str:
- """Make an ID for a parameter in a ParameterSet."""
- idval = self._idval_from_function(val, argname, idx)
- if idval is not None:
- return idval
- idval = self._idval_from_hook(val, argname)
- if idval is not None:
- return idval
- idval = self._idval_from_value(val)
- if idval is not None:
- return idval
- return self._idval_from_argname(argname, idx)
-
- def _idval_from_function(
- self, val: object, argname: str, idx: int
- ) -> Optional[str]:
- """Try to make an ID for a parameter in a ParameterSet using the
- user-provided id callable, if given."""
- if self.idfn is None:
- return None
- try:
- id = self.idfn(val)
- except Exception as e:
- prefix = f"{self.nodeid}: " if self.nodeid is not None else ""
- msg = "error raised while trying to determine id of parameter '{}' at position {}"
- msg = prefix + msg.format(argname, idx)
- raise ValueError(msg) from e
- if id is None:
- return None
- return self._idval_from_value(id)
-
- def _idval_from_hook(self, val: object, argname: str) -> Optional[str]:
- """Try to make an ID for a parameter in a ParameterSet by calling the
- :hook:`pytest_make_parametrize_id` hook."""
- if self.config:
- id: Optional[str] = self.config.hook.pytest_make_parametrize_id(
- config=self.config, val=val, argname=argname
- )
- return id
- return None
-
- def _idval_from_value(self, val: object) -> Optional[str]:
- """Try to make an ID for a parameter in a ParameterSet from its value,
- if the value type is supported."""
- if isinstance(val, STRING_TYPES):
- return _ascii_escaped_by_config(val, self.config)
- elif val is None or isinstance(val, (float, int, bool, complex)):
- return str(val)
- elif isinstance(val, Pattern):
- return ascii_escaped(val.pattern)
- elif val is NOTSET:
- # Fallback to default. Note that NOTSET is an enum.Enum.
- pass
- elif isinstance(val, enum.Enum):
- return str(val)
- elif isinstance(getattr(val, "__name__", None), str):
- # Name of a class, function, module, etc.
- name: str = getattr(val, "__name__")
- return name
- return None
-
- def _idval_from_value_required(self, val: object, idx: int) -> str:
- """Like _idval_from_value(), but fails if the type is not supported."""
- id = self._idval_from_value(val)
- if id is not None:
- return id
-
- # Fail.
- if self.func_name is not None:
- prefix = f"In {self.func_name}: "
- elif self.nodeid is not None:
- prefix = f"In {self.nodeid}: "
- else:
- prefix = ""
- msg = (
- f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. "
- "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
- )
- fail(msg, pytrace=False)
-
- @staticmethod
- def _idval_from_argname(argname: str, idx: int) -> str:
- """Make an ID for a parameter in a ParameterSet from the argument name
- and the index of the ParameterSet."""
- return str(argname) + str(idx)
-
-
-@final
-@attr.s(frozen=True, slots=True, auto_attribs=True)
-class CallSpec2:
- """A planned parameterized invocation of a test function.
-
- Calculated during collection for a given test function's Metafunc.
- Once collection is over, each callspec is turned into a single Item
- and stored in item.callspec.
- """
-
- # arg name -> arg value which will be passed to the parametrized test
- # function (direct parameterization).
- funcargs: Dict[str, object] = attr.Factory(dict)
- # arg name -> arg value which will be passed to a fixture of the same name
- # (indirect parametrization).
- params: Dict[str, object] = attr.Factory(dict)
- # arg name -> arg index.
- indices: Dict[str, int] = attr.Factory(dict)
- # Used for sorting parametrized resources.
- _arg2scope: Dict[str, Scope] = attr.Factory(dict)
- # Parts which will be added to the item's name in `[..]` separated by "-".
- _idlist: List[str] = attr.Factory(list)
- # Marks which will be applied to the item.
- marks: List[Mark] = attr.Factory(list)
-
- def setmulti(
- self,
- *,
- valtypes: Mapping[str, "Literal['params', 'funcargs']"],
- argnames: Iterable[str],
- valset: Iterable[object],
- id: str,
- marks: Iterable[Union[Mark, MarkDecorator]],
- scope: Scope,
- param_index: int,
- ) -> "CallSpec2":
- funcargs = self.funcargs.copy()
- params = self.params.copy()
- indices = self.indices.copy()
- arg2scope = self._arg2scope.copy()
- for arg, val in zip(argnames, valset):
- if arg in params or arg in funcargs:
- raise ValueError(f"duplicate {arg!r}")
- valtype_for_arg = valtypes[arg]
- if valtype_for_arg == "params":
- params[arg] = val
- elif valtype_for_arg == "funcargs":
- funcargs[arg] = val
- else:
- assert_never(valtype_for_arg)
- indices[arg] = param_index
- arg2scope[arg] = scope
- return CallSpec2(
- funcargs=funcargs,
- params=params,
- arg2scope=arg2scope,
- indices=indices,
- idlist=[*self._idlist, id],
- marks=[*self.marks, *normalize_mark_list(marks)],
- )
-
- def getparam(self, name: str) -> object:
- try:
- return self.params[name]
- except KeyError as e:
- raise ValueError(name) from e
-
- @property
- def id(self) -> str:
- return "-".join(self._idlist)
-
-
-@final
-class Metafunc:
- """Objects passed to the :hook:`pytest_generate_tests` hook.
-
- They help to inspect a test function and to generate tests according to
- test configuration or values specified in the class or module where a
- test function is defined.
- """
-
- def __init__(
- self,
- definition: "FunctionDefinition",
- fixtureinfo: fixtures.FuncFixtureInfo,
- config: Config,
- cls=None,
- module=None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
-
- #: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
- self.definition = definition
-
- #: Access to the :class:`pytest.Config` object for the test session.
- self.config = config
-
- #: The module object where the test function is defined in.
- self.module = module
-
- #: Underlying Python test function.
- self.function = definition.obj
-
- #: Set of fixture names required by the test function.
- self.fixturenames = fixtureinfo.names_closure
-
- #: Class object where the test function is defined in or ``None``.
- self.cls = cls
-
- self._arg2fixturedefs = fixtureinfo.name2fixturedefs
-
- # Result of parametrize().
- self._calls: List[CallSpec2] = []
-
- def parametrize(
- self,
- argnames: Union[str, List[str], Tuple[str, ...]],
- argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
- indirect: Union[bool, Sequence[str]] = False,
- ids: Optional[
- Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
- ] = None,
- scope: "Optional[_ScopeName]" = None,
- *,
- _param_mark: Optional[Mark] = None,
- ) -> None:
- """Add new invocations to the underlying test function using the list
- of argvalues for the given argnames. Parametrization is performed
- during the collection phase. If you need to setup expensive resources
- see about setting indirect to do it rather than at test setup time.
-
- Can be called multiple times, in which case each call parametrizes all
- previous parametrizations, e.g.
-
- ::
-
- unparametrized: t
- parametrize ["x", "y"]: t[x], t[y]
- parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
-
- :param argnames:
- A comma-separated string denoting one or more argument names, or
- a list/tuple of argument strings.
-
- :param argvalues:
- The list of argvalues determines how often a test is invoked with
- different argument values.
-
- If only one argname was specified argvalues is a list of values.
- If N argnames were specified, argvalues must be a list of
- N-tuples, where each tuple-element specifies a value for its
- respective argname.
-
- :param indirect:
- A list of arguments' names (subset of argnames) or a boolean.
- If True the list contains all names from the argnames. Each
- argvalue corresponding to an argname in this list will
- be passed as request.param to its respective argname fixture
- function so that it can perform more expensive setups during the
- setup phase of a test rather than at collection time.
-
- :param ids:
- Sequence of (or generator for) ids for ``argvalues``,
- or a callable to return part of the id for each argvalue.
-
- With sequences (and generators like ``itertools.count()``) the
- returned ids should be of type ``string``, ``int``, ``float``,
- ``bool``, or ``None``.
- They are mapped to the corresponding index in ``argvalues``.
- ``None`` means to use the auto-generated id.
-
- If it is a callable it will be called for each entry in
- ``argvalues``, and the return value is used as part of the
- auto-generated id for the whole set (where parts are joined with
- dashes ("-")).
- This is useful to provide more specific ids for certain items, e.g.
- dates. Returning ``None`` will use an auto-generated id.
-
- If no ids are provided they will be generated automatically from
- the argvalues.
-
- :param scope:
- If specified it denotes the scope of the parameters.
- The scope is used for grouping tests by parameter instances.
- It will also override any fixture-function defined scope, allowing
- to set a dynamic scope using test context or configuration.
- """
- argnames, parametersets = ParameterSet._for_parametrize(
- argnames,
- argvalues,
- self.function,
- self.config,
- nodeid=self.definition.nodeid,
- )
- del argvalues
-
- if "request" in argnames:
- fail(
- "'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
- pytrace=False,
- )
-
- if scope is not None:
- scope_ = Scope.from_user(
- scope, descr=f"parametrize() call in {self.function.__name__}"
- )
- else:
- scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
-
- self._validate_if_using_arg_names(argnames, indirect)
-
- arg_values_types = self._resolve_arg_value_types(argnames, indirect)
-
- # Use any already (possibly) generated ids with parametrize Marks.
- if _param_mark and _param_mark._param_ids_from:
- generated_ids = _param_mark._param_ids_from._param_ids_generated
- if generated_ids is not None:
- ids = generated_ids
-
- ids = self._resolve_parameter_set_ids(
- argnames, ids, parametersets, nodeid=self.definition.nodeid
- )
-
- # Store used (possibly generated) ids with parametrize Marks.
- if _param_mark and _param_mark._param_ids_from and generated_ids is None:
- object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
-
- # Create the new calls: if we are parametrize() multiple times (by applying the decorator
- # more than once) then we accumulate those calls generating the cartesian product
- # of all calls.
- newcalls = []
- for callspec in self._calls or [CallSpec2()]:
- for param_index, (param_id, param_set) in enumerate(
- zip(ids, parametersets)
- ):
- newcallspec = callspec.setmulti(
- valtypes=arg_values_types,
- argnames=argnames,
- valset=param_set.values,
- id=param_id,
- marks=param_set.marks,
- scope=scope_,
- param_index=param_index,
- )
- newcalls.append(newcallspec)
- self._calls = newcalls
-
- def _resolve_parameter_set_ids(
- self,
- argnames: Sequence[str],
- ids: Optional[
- Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
- ],
- parametersets: Sequence[ParameterSet],
- nodeid: str,
- ) -> List[str]:
- """Resolve the actual ids for the given parameter sets.
-
- :param argnames:
- Argument names passed to ``parametrize()``.
- :param ids:
- The `ids` parameter of the ``parametrize()`` call (see docs).
- :param parametersets:
- The parameter sets, each containing a set of values corresponding
- to ``argnames``.
- :param nodeid str:
- The nodeid of the definition item that generated this
- parametrization.
- :returns:
- List with ids for each parameter set given.
- """
- if ids is None:
- idfn = None
- ids_ = None
- elif callable(ids):
- idfn = ids
- ids_ = None
- else:
- idfn = None
- ids_ = self._validate_ids(ids, parametersets, self.function.__name__)
- id_maker = IdMaker(
- argnames,
- parametersets,
- idfn,
- ids_,
- self.config,
- nodeid=nodeid,
- func_name=self.function.__name__,
- )
- return id_maker.make_unique_parameterset_ids()
-
- def _validate_ids(
- self,
- ids: Iterable[Optional[object]],
- parametersets: Sequence[ParameterSet],
- func_name: str,
- ) -> List[Optional[object]]:
- try:
- num_ids = len(ids) # type: ignore[arg-type]
- except TypeError:
- try:
- iter(ids)
- except TypeError as e:
- raise TypeError("ids must be a callable or an iterable") from e
- num_ids = len(parametersets)
-
- # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
- if num_ids != len(parametersets) and num_ids != 0:
- msg = "In {}: {} parameter sets specified, with different number of ids: {}"
- fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False)
-
- return list(itertools.islice(ids, num_ids))
-
- def _resolve_arg_value_types(
- self,
- argnames: Sequence[str],
- indirect: Union[bool, Sequence[str]],
- ) -> Dict[str, "Literal['params', 'funcargs']"]:
- """Resolve if each parametrized argument must be considered a
- parameter to a fixture or a "funcarg" to the function, based on the
- ``indirect`` parameter of the parametrized() call.
-
- :param List[str] argnames: List of argument names passed to ``parametrize()``.
- :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
- :rtype: Dict[str, str]
- A dict mapping each arg name to either:
- * "params" if the argname should be the parameter of a fixture of the same name.
- * "funcargs" if the argname should be a parameter to the parametrized test function.
- """
- if isinstance(indirect, bool):
- valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys(
- argnames, "params" if indirect else "funcargs"
- )
- elif isinstance(indirect, Sequence):
- valtypes = dict.fromkeys(argnames, "funcargs")
- for arg in indirect:
- if arg not in argnames:
- fail(
- "In {}: indirect fixture '{}' doesn't exist".format(
- self.function.__name__, arg
- ),
- pytrace=False,
- )
- valtypes[arg] = "params"
- else:
- fail(
- "In {func}: expected Sequence or boolean for indirect, got {type}".format(
- type=type(indirect).__name__, func=self.function.__name__
- ),
- pytrace=False,
- )
- return valtypes
-
- def _validate_if_using_arg_names(
- self,
- argnames: Sequence[str],
- indirect: Union[bool, Sequence[str]],
- ) -> None:
- """Check if all argnames are being used, by default values, or directly/indirectly.
-
- :param List[str] argnames: List of argument names passed to ``parametrize()``.
- :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
- :raises ValueError: If validation fails.
- """
- default_arg_names = set(get_default_arg_names(self.function))
- func_name = self.function.__name__
- for arg in argnames:
- if arg not in self.fixturenames:
- if arg in default_arg_names:
- fail(
- "In {}: function already takes an argument '{}' with a default value".format(
- func_name, arg
- ),
- pytrace=False,
- )
- else:
- if isinstance(indirect, Sequence):
- name = "fixture" if arg in indirect else "argument"
- else:
- name = "fixture" if indirect else "argument"
- fail(
- f"In {func_name}: function uses no {name} '{arg}'",
- pytrace=False,
- )
-
-
-def _find_parametrized_scope(
- argnames: Sequence[str],
- arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
- indirect: Union[bool, Sequence[str]],
-) -> Scope:
- """Find the most appropriate scope for a parametrized call based on its arguments.
-
- When there's at least one direct argument, always use "function" scope.
-
- When a test function is parametrized and all its arguments are indirect
- (e.g. fixtures), return the most narrow scope based on the fixtures used.
-
- Related to issue #1832, based on code posted by @Kingdread.
- """
- if isinstance(indirect, Sequence):
- all_arguments_are_fixtures = len(indirect) == len(argnames)
- else:
- all_arguments_are_fixtures = bool(indirect)
-
- if all_arguments_are_fixtures:
- fixturedefs = arg2fixturedefs or {}
- used_scopes = [
- fixturedef[0]._scope
- for name, fixturedef in fixturedefs.items()
- if name in argnames
- ]
- # Takes the most narrow scope from used fixtures.
- return min(used_scopes, default=Scope.Function)
-
- return Scope.Function
-
-
-def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str:
- if config is None:
- escape_option = False
- else:
- escape_option = config.getini(
- "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
- )
- # TODO: If escaping is turned off and the user passes bytes,
- # will return a bytes. For now we ignore this but the
- # code *probably* doesn't handle this case.
- return val if escape_option else ascii_escaped(val) # type: ignore
-
-
-def _pretty_fixture_path(func) -> str:
- cwd = Path.cwd()
- loc = Path(getlocation(func, str(cwd)))
- prefix = Path("...", "_pytest")
- try:
- return str(prefix / loc.relative_to(_PYTEST_DIR))
- except ValueError:
- return bestrelpath(cwd, loc)
-
-
-def show_fixtures_per_test(config):
- from _pytest.main import wrap_session
-
- return wrap_session(config, _show_fixtures_per_test)
-
-
-def _show_fixtures_per_test(config: Config, session: Session) -> None:
- import _pytest.config
-
- session.perform_collect()
- curdir = Path.cwd()
- tw = _pytest.config.create_terminal_writer(config)
- verbose = config.getvalue("verbose")
-
- def get_best_relpath(func) -> str:
- loc = getlocation(func, str(curdir))
- return bestrelpath(curdir, Path(loc))
-
- def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None:
- argname = fixture_def.argname
- if verbose <= 0 and argname.startswith("_"):
- return
- prettypath = _pretty_fixture_path(fixture_def.func)
- tw.write(f"{argname}", green=True)
- tw.write(f" -- {prettypath}", yellow=True)
- tw.write("\n")
- fixture_doc = inspect.getdoc(fixture_def.func)
- if fixture_doc:
- write_docstring(
- tw, fixture_doc.split("\n\n")[0] if verbose <= 0 else fixture_doc
- )
- else:
- tw.line(" no docstring available", red=True)
-
- def write_item(item: nodes.Item) -> None:
- # Not all items have _fixtureinfo attribute.
- info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None)
- if info is None or not info.name2fixturedefs:
- # This test item does not use any fixtures.
- return
- tw.line()
- tw.sep("-", f"fixtures used by {item.name}")
- # TODO: Fix this type ignore.
- tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined]
- # dict key not used in loop but needed for sorting.
- for _, fixturedefs in sorted(info.name2fixturedefs.items()):
- assert fixturedefs is not None
- if not fixturedefs:
- continue
- # Last item is expected to be the one used by the test item.
- write_fixture(fixturedefs[-1])
-
- for session_item in session.items:
- write_item(session_item)
-
-
-def showfixtures(config: Config) -> Union[int, ExitCode]:
- from _pytest.main import wrap_session
-
- return wrap_session(config, _showfixtures_main)
-
-
-def _showfixtures_main(config: Config, session: Session) -> None:
- import _pytest.config
-
- session.perform_collect()
- curdir = Path.cwd()
- tw = _pytest.config.create_terminal_writer(config)
- verbose = config.getvalue("verbose")
-
- fm = session._fixturemanager
-
- available = []
- seen: Set[Tuple[str, str]] = set()
-
- for argname, fixturedefs in fm._arg2fixturedefs.items():
- assert fixturedefs is not None
- if not fixturedefs:
- continue
- for fixturedef in fixturedefs:
- loc = getlocation(fixturedef.func, str(curdir))
- if (fixturedef.argname, loc) in seen:
- continue
- seen.add((fixturedef.argname, loc))
- available.append(
- (
- len(fixturedef.baseid),
- fixturedef.func.__module__,
- _pretty_fixture_path(fixturedef.func),
- fixturedef.argname,
- fixturedef,
- )
- )
-
- available.sort()
- currentmodule = None
- for baseid, module, prettypath, argname, fixturedef in available:
- if currentmodule != module:
- if not module.startswith("_pytest."):
- tw.line()
- tw.sep("-", f"fixtures defined from {module}")
- currentmodule = module
- if verbose <= 0 and argname.startswith("_"):
- continue
- tw.write(f"{argname}", green=True)
- if fixturedef.scope != "function":
- tw.write(" [%s scope]" % fixturedef.scope, cyan=True)
- tw.write(f" -- {prettypath}", yellow=True)
- tw.write("\n")
- doc = inspect.getdoc(fixturedef.func)
- if doc:
- write_docstring(tw, doc.split("\n\n")[0] if verbose <= 0 else doc)
- else:
- tw.line(" no docstring available", red=True)
- tw.line()
-
-
-def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None:
- for line in doc.split("\n"):
- tw.line(indent + line)
-
-
-class Function(PyobjMixin, nodes.Item):
- """An Item responsible for setting up and executing a Python test function.
-
- :param name:
- The full function name, including any decorations like those
- added by parametrization (``my_func[my_param]``).
- :param parent:
- The parent Node.
- :param config:
- The pytest Config object.
- :param callspec:
- If given, this is function has been parametrized and the callspec contains
- meta information about the parametrization.
- :param callobj:
- If given, the object which will be called when the Function is invoked,
- otherwise the callobj will be obtained from ``parent`` using ``originalname``.
- :param keywords:
- Keywords bound to the function object for "-k" matching.
- :param session:
- The pytest Session object.
- :param fixtureinfo:
- Fixture information already resolved at this fixture node..
- :param originalname:
- The attribute name to use for accessing the underlying function object.
- Defaults to ``name``. Set this if name is different from the original name,
- for example when it contains decorations like those added by parametrization
- (``my_func[my_param]``).
- """
-
- # Disable since functions handle it themselves.
- _ALLOW_MARKERS = False
-
- def __init__(
- self,
- name: str,
- parent,
- config: Optional[Config] = None,
- callspec: Optional[CallSpec2] = None,
- callobj=NOTSET,
- keywords: Optional[Mapping[str, Any]] = None,
- session: Optional[Session] = None,
- fixtureinfo: Optional[FuncFixtureInfo] = None,
- originalname: Optional[str] = None,
- ) -> None:
- super().__init__(name, parent, config=config, session=session)
-
- if callobj is not NOTSET:
- self.obj = callobj
-
- #: Original function name, without any decorations (for example
- #: parametrization adds a ``"[...]"`` suffix to function names), used to access
- #: the underlying function object from ``parent`` (in case ``callobj`` is not given
- #: explicitly).
- #:
- #: .. versionadded:: 3.0
- self.originalname = originalname or name
-
- # Note: when FunctionDefinition is introduced, we should change ``originalname``
- # to a readonly property that returns FunctionDefinition.name.
-
- self.own_markers.extend(get_unpacked_marks(self.obj))
- if callspec:
- self.callspec = callspec
- self.own_markers.extend(callspec.marks)
-
- # todo: this is a hell of a hack
- # https://github.com/pytest-dev/pytest/issues/4569
- # Note: the order of the updates is important here; indicates what
- # takes priority (ctor argument over function attributes over markers).
- # Take own_markers only; NodeKeywords handles parent traversal on its own.
- self.keywords.update((mark.name, mark) for mark in self.own_markers)
- self.keywords.update(self.obj.__dict__)
- if keywords:
- self.keywords.update(keywords)
-
- if fixtureinfo is None:
- fixtureinfo = self.session._fixturemanager.getfixtureinfo(
- self, self.obj, self.cls, funcargs=True
- )
- self._fixtureinfo: FuncFixtureInfo = fixtureinfo
- self.fixturenames = fixtureinfo.names_closure
- self._initrequest()
-
- @classmethod
- def from_parent(cls, parent, **kw): # todo: determine sound type limitations
- """The public constructor."""
- return super().from_parent(parent=parent, **kw)
-
- def _initrequest(self) -> None:
- self.funcargs: Dict[str, object] = {}
- self._request = fixtures.FixtureRequest(self, _ispytest=True)
-
- @property
- def function(self):
- """Underlying python 'function' object."""
- return getimfunc(self.obj)
-
- def _getobj(self):
- assert self.parent is not None
- if isinstance(self.parent, Class):
- # Each Function gets a fresh class instance.
- parent_obj = self.parent.newinstance()
- else:
- parent_obj = self.parent.obj # type: ignore[attr-defined]
- return getattr(parent_obj, self.originalname)
-
- @property
- def _pyfuncitem(self):
- """(compatonly) for code expecting pytest-2.2 style request objects."""
- return self
-
- def runtest(self) -> None:
- """Execute the underlying test function."""
- self.ihook.pytest_pyfunc_call(pyfuncitem=self)
-
- def setup(self) -> None:
- self._request._fillfixtures()
-
- def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None:
- if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
- code = _pytest._code.Code.from_function(get_real_func(self.obj))
- path, firstlineno = code.path, code.firstlineno
- traceback = excinfo.traceback
- ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
- if ntraceback == traceback:
- ntraceback = ntraceback.cut(path=path)
- if ntraceback == traceback:
- ntraceback = ntraceback.filter(filter_traceback)
- if not ntraceback:
- ntraceback = traceback
-
- excinfo.traceback = ntraceback.filter()
- # issue364: mark all but first and last frames to
- # only show a single-line message for each frame.
- if self.config.getoption("tbstyle", "auto") == "auto":
- if len(excinfo.traceback) > 2:
- for entry in excinfo.traceback[1:-1]:
- entry.set_repr_style("short")
-
- # TODO: Type ignored -- breaks Liskov Substitution.
- def repr_failure( # type: ignore[override]
- self,
- excinfo: ExceptionInfo[BaseException],
- ) -> Union[str, TerminalRepr]:
- style = self.config.getoption("tbstyle", "auto")
- if style == "auto":
- style = "long"
- return self._repr_failure_py(excinfo, style=style)
-
-
-class FunctionDefinition(Function):
- """
- This class is a step gap solution until we evolve to have actual function definition nodes
- and manage to get rid of ``metafunc``.
- """
-
- def runtest(self) -> None:
- raise RuntimeError("function definitions are not supposed to be run as tests")
-
- setup = runtest
diff --git a/contrib/python/pytest/py3/_pytest/python_api.py b/contrib/python/pytest/py3/_pytest/python_api.py
deleted file mode 100644
index 5fa2196192..0000000000
--- a/contrib/python/pytest/py3/_pytest/python_api.py
+++ /dev/null
@@ -1,975 +0,0 @@
-import math
-import pprint
-from collections.abc import Collection
-from collections.abc import Sized
-from decimal import Decimal
-from numbers import Complex
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Generic
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import overload
-from typing import Pattern
-from typing import Sequence
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-if TYPE_CHECKING:
- from numpy import ndarray
-
-
-import _pytest._code
-from _pytest.compat import final
-from _pytest.compat import STRING_TYPES
-from _pytest.outcomes import fail
-
-
-def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
- at_str = f" at {at}" if at else ""
- return TypeError(
- "cannot make approximate comparisons to non-numeric values: {!r} {}".format(
- value, at_str
- )
- )
-
-
-def _compare_approx(
- full_object: object,
- message_data: Sequence[Tuple[str, str, str]],
- number_of_elements: int,
- different_ids: Sequence[object],
- max_abs_diff: float,
- max_rel_diff: float,
-) -> List[str]:
- message_list = list(message_data)
- message_list.insert(0, ("Index", "Obtained", "Expected"))
- max_sizes = [0, 0, 0]
- for index, obtained, expected in message_list:
- max_sizes[0] = max(max_sizes[0], len(index))
- max_sizes[1] = max(max_sizes[1], len(obtained))
- max_sizes[2] = max(max_sizes[2], len(expected))
- explanation = [
- f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:",
- f"Max absolute difference: {max_abs_diff}",
- f"Max relative difference: {max_rel_diff}",
- ] + [
- f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}"
- for indexes, obtained, expected in message_list
- ]
- return explanation
-
-
-# builtin pytest.approx helper
-
-
-class ApproxBase:
- """Provide shared utilities for making approximate comparisons between
- numbers or sequences of numbers."""
-
- # Tell numpy to use our `__eq__` operator instead of its.
- __array_ufunc__ = None
- __array_priority__ = 100
-
- def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None:
- __tracebackhide__ = True
- self.expected = expected
- self.abs = abs
- self.rel = rel
- self.nan_ok = nan_ok
- self._check_type()
-
- def __repr__(self) -> str:
- raise NotImplementedError
-
- def _repr_compare(self, other_side: Any) -> List[str]:
- return [
- "comparison failed",
- f"Obtained: {other_side}",
- f"Expected: {self}",
- ]
-
- def __eq__(self, actual) -> bool:
- return all(
- a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
- )
-
- def __bool__(self):
- __tracebackhide__ = True
- raise AssertionError(
- "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?"
- )
-
- # Ignore type because of https://github.com/python/mypy/issues/4266.
- __hash__ = None # type: ignore
-
- def __ne__(self, actual) -> bool:
- return not (actual == self)
-
- def _approx_scalar(self, x) -> "ApproxScalar":
- if isinstance(x, Decimal):
- return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
- return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
-
- def _yield_comparisons(self, actual):
- """Yield all the pairs of numbers to be compared.
-
- This is used to implement the `__eq__` method.
- """
- raise NotImplementedError
-
- def _check_type(self) -> None:
- """Raise a TypeError if the expected value is not a valid type."""
- # This is only a concern if the expected value is a sequence. In every
- # other case, the approx() function ensures that the expected value has
- # a numeric type. For this reason, the default is to do nothing. The
- # classes that deal with sequences should reimplement this method to
- # raise if there are any non-numeric elements in the sequence.
-
-
-def _recursive_list_map(f, x):
- if isinstance(x, list):
- return [_recursive_list_map(f, xi) for xi in x]
- else:
- return f(x)
-
-
-class ApproxNumpy(ApproxBase):
- """Perform approximate comparisons where the expected value is numpy array."""
-
- def __repr__(self) -> str:
- list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
- return f"approx({list_scalars!r})"
-
- def _repr_compare(self, other_side: "ndarray") -> List[str]:
- import itertools
- import math
-
- def get_value_from_nested_list(
- nested_list: List[Any], nd_index: Tuple[Any, ...]
- ) -> Any:
- """
- Helper function to get the value out of a nested list, given an n-dimensional index.
- This mimics numpy's indexing, but for raw nested python lists.
- """
- value: Any = nested_list
- for i in nd_index:
- value = value[i]
- return value
-
- np_array_shape = self.expected.shape
- approx_side_as_list = _recursive_list_map(
- self._approx_scalar, self.expected.tolist()
- )
-
- if np_array_shape != other_side.shape:
- return [
- "Impossible to compare arrays with different shapes.",
- f"Shapes: {np_array_shape} and {other_side.shape}",
- ]
-
- number_of_elements = self.expected.size
- max_abs_diff = -math.inf
- max_rel_diff = -math.inf
- different_ids = []
- for index in itertools.product(*(range(i) for i in np_array_shape)):
- approx_value = get_value_from_nested_list(approx_side_as_list, index)
- other_value = get_value_from_nested_list(other_side, index)
- if approx_value != other_value:
- abs_diff = abs(approx_value.expected - other_value)
- max_abs_diff = max(max_abs_diff, abs_diff)
- if other_value == 0.0:
- max_rel_diff = math.inf
- else:
- max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
- different_ids.append(index)
-
- message_data = [
- (
- str(index),
- str(get_value_from_nested_list(other_side, index)),
- str(get_value_from_nested_list(approx_side_as_list, index)),
- )
- for index in different_ids
- ]
- return _compare_approx(
- self.expected,
- message_data,
- number_of_elements,
- different_ids,
- max_abs_diff,
- max_rel_diff,
- )
-
- def __eq__(self, actual) -> bool:
- import numpy as np
-
- # self.expected is supposed to always be an array here.
-
- if not np.isscalar(actual):
- try:
- actual = np.asarray(actual)
- except Exception as e:
- raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e
-
- if not np.isscalar(actual) and actual.shape != self.expected.shape:
- return False
-
- return super().__eq__(actual)
-
- def _yield_comparisons(self, actual):
- import numpy as np
-
- # `actual` can either be a numpy array or a scalar, it is treated in
- # `__eq__` before being passed to `ApproxBase.__eq__`, which is the
- # only method that calls this one.
-
- if np.isscalar(actual):
- for i in np.ndindex(self.expected.shape):
- yield actual, self.expected[i].item()
- else:
- for i in np.ndindex(self.expected.shape):
- yield actual[i].item(), self.expected[i].item()
-
-
-class ApproxMapping(ApproxBase):
- """Perform approximate comparisons where the expected value is a mapping
- with numeric values (the keys can be anything)."""
-
- def __repr__(self) -> str:
- return "approx({!r})".format(
- {k: self._approx_scalar(v) for k, v in self.expected.items()}
- )
-
- def _repr_compare(self, other_side: Mapping[object, float]) -> List[str]:
- import math
-
- approx_side_as_map = {
- k: self._approx_scalar(v) for k, v in self.expected.items()
- }
-
- number_of_elements = len(approx_side_as_map)
- max_abs_diff = -math.inf
- max_rel_diff = -math.inf
- different_ids = []
- for (approx_key, approx_value), other_value in zip(
- approx_side_as_map.items(), other_side.values()
- ):
- if approx_value != other_value:
- max_abs_diff = max(
- max_abs_diff, abs(approx_value.expected - other_value)
- )
- max_rel_diff = max(
- max_rel_diff,
- abs((approx_value.expected - other_value) / approx_value.expected),
- )
- different_ids.append(approx_key)
-
- message_data = [
- (str(key), str(other_side[key]), str(approx_side_as_map[key]))
- for key in different_ids
- ]
-
- return _compare_approx(
- self.expected,
- message_data,
- number_of_elements,
- different_ids,
- max_abs_diff,
- max_rel_diff,
- )
-
- def __eq__(self, actual) -> bool:
- try:
- if set(actual.keys()) != set(self.expected.keys()):
- return False
- except AttributeError:
- return False
-
- return super().__eq__(actual)
-
- def _yield_comparisons(self, actual):
- for k in self.expected.keys():
- yield actual[k], self.expected[k]
-
- def _check_type(self) -> None:
- __tracebackhide__ = True
- for key, value in self.expected.items():
- if isinstance(value, type(self.expected)):
- msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
- raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
-
-
-class ApproxSequenceLike(ApproxBase):
- """Perform approximate comparisons where the expected value is a sequence of numbers."""
-
- def __repr__(self) -> str:
- seq_type = type(self.expected)
- if seq_type not in (tuple, list):
- seq_type = list
- return "approx({!r})".format(
- seq_type(self._approx_scalar(x) for x in self.expected)
- )
-
- def _repr_compare(self, other_side: Sequence[float]) -> List[str]:
- import math
-
- if len(self.expected) != len(other_side):
- return [
- "Impossible to compare lists with different sizes.",
- f"Lengths: {len(self.expected)} and {len(other_side)}",
- ]
-
- approx_side_as_map = _recursive_list_map(self._approx_scalar, self.expected)
-
- number_of_elements = len(approx_side_as_map)
- max_abs_diff = -math.inf
- max_rel_diff = -math.inf
- different_ids = []
- for i, (approx_value, other_value) in enumerate(
- zip(approx_side_as_map, other_side)
- ):
- if approx_value != other_value:
- abs_diff = abs(approx_value.expected - other_value)
- max_abs_diff = max(max_abs_diff, abs_diff)
- if other_value == 0.0:
- max_rel_diff = math.inf
- else:
- max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
- different_ids.append(i)
-
- message_data = [
- (str(i), str(other_side[i]), str(approx_side_as_map[i]))
- for i in different_ids
- ]
-
- return _compare_approx(
- self.expected,
- message_data,
- number_of_elements,
- different_ids,
- max_abs_diff,
- max_rel_diff,
- )
-
- def __eq__(self, actual) -> bool:
- try:
- if len(actual) != len(self.expected):
- return False
- except TypeError:
- return False
- return super().__eq__(actual)
-
- def _yield_comparisons(self, actual):
- return zip(actual, self.expected)
-
- def _check_type(self) -> None:
- __tracebackhide__ = True
- for index, x in enumerate(self.expected):
- if isinstance(x, type(self.expected)):
- msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
- raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
-
-
-class ApproxScalar(ApproxBase):
- """Perform approximate comparisons where the expected value is a single number."""
-
- # Using Real should be better than this Union, but not possible yet:
- # https://github.com/python/typeshed/pull/3108
- DEFAULT_ABSOLUTE_TOLERANCE: Union[float, Decimal] = 1e-12
- DEFAULT_RELATIVE_TOLERANCE: Union[float, Decimal] = 1e-6
-
- def __repr__(self) -> str:
- """Return a string communicating both the expected value and the
- tolerance for the comparison being made.
-
- For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``.
- """
- # Don't show a tolerance for values that aren't compared using
- # tolerances, i.e. non-numerics and infinities. Need to call abs to
- # handle complex numbers, e.g. (inf + 1j).
- if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf(
- abs(self.expected) # type: ignore[arg-type]
- ):
- return str(self.expected)
-
- # If a sensible tolerance can't be calculated, self.tolerance will
- # raise a ValueError. In this case, display '???'.
- try:
- vetted_tolerance = f"{self.tolerance:.1e}"
- if (
- isinstance(self.expected, Complex)
- and self.expected.imag
- and not math.isinf(self.tolerance)
- ):
- vetted_tolerance += " ∠ ±180°"
- except ValueError:
- vetted_tolerance = "???"
-
- return f"{self.expected} ± {vetted_tolerance}"
-
- def __eq__(self, actual) -> bool:
- """Return whether the given value is equal to the expected value
- within the pre-specified tolerance."""
- asarray = _as_numpy_array(actual)
- if asarray is not None:
- # Call ``__eq__()`` manually to prevent infinite-recursion with
- # numpy<1.13. See #3748.
- return all(self.__eq__(a) for a in asarray.flat)
-
- # Short-circuit exact equality.
- if actual == self.expected:
- return True
-
- # If either type is non-numeric, fall back to strict equality.
- # NB: we need Complex, rather than just Number, to ensure that __abs__,
- # __sub__, and __float__ are defined.
- if not (
- isinstance(self.expected, (Complex, Decimal))
- and isinstance(actual, (Complex, Decimal))
- ):
- return False
-
- # Allow the user to control whether NaNs are considered equal to each
- # other or not. The abs() calls are for compatibility with complex
- # numbers.
- if math.isnan(abs(self.expected)): # type: ignore[arg-type]
- return self.nan_ok and math.isnan(abs(actual)) # type: ignore[arg-type]
-
- # Infinity shouldn't be approximately equal to anything but itself, but
- # if there's a relative tolerance, it will be infinite and infinity
- # will seem approximately equal to everything. The equal-to-itself
- # case would have been short circuited above, so here we can just
- # return false if the expected value is infinite. The abs() call is
- # for compatibility with complex numbers.
- if math.isinf(abs(self.expected)): # type: ignore[arg-type]
- return False
-
- # Return true if the two numbers are within the tolerance.
- result: bool = abs(self.expected - actual) <= self.tolerance
- return result
-
- # Ignore type because of https://github.com/python/mypy/issues/4266.
- __hash__ = None # type: ignore
-
- @property
- def tolerance(self):
- """Return the tolerance for the comparison.
-
- This could be either an absolute tolerance or a relative tolerance,
- depending on what the user specified or which would be larger.
- """
-
- def set_default(x, default):
- return x if x is not None else default
-
- # Figure out what the absolute tolerance should be. ``self.abs`` is
- # either None or a value specified by the user.
- absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
-
- if absolute_tolerance < 0:
- raise ValueError(
- f"absolute tolerance can't be negative: {absolute_tolerance}"
- )
- if math.isnan(absolute_tolerance):
- raise ValueError("absolute tolerance can't be NaN.")
-
- # If the user specified an absolute tolerance but not a relative one,
- # just return the absolute tolerance.
- if self.rel is None:
- if self.abs is not None:
- return absolute_tolerance
-
- # Figure out what the relative tolerance should be. ``self.rel`` is
- # either None or a value specified by the user. This is done after
- # we've made sure the user didn't ask for an absolute tolerance only,
- # because we don't want to raise errors about the relative tolerance if
- # we aren't even going to use it.
- relative_tolerance = set_default(
- self.rel, self.DEFAULT_RELATIVE_TOLERANCE
- ) * abs(self.expected)
-
- if relative_tolerance < 0:
- raise ValueError(
- f"relative tolerance can't be negative: {relative_tolerance}"
- )
- if math.isnan(relative_tolerance):
- raise ValueError("relative tolerance can't be NaN.")
-
- # Return the larger of the relative and absolute tolerances.
- return max(relative_tolerance, absolute_tolerance)
-
-
-class ApproxDecimal(ApproxScalar):
- """Perform approximate comparisons where the expected value is a Decimal."""
-
- DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
- DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
-
-
-def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
- """Assert that two numbers (or two ordered sequences of numbers) are equal to each other
- within some tolerance.
-
- Due to the :std:doc:`tutorial/floatingpoint`, numbers that we
- would intuitively expect to be equal are not always so::
-
- >>> 0.1 + 0.2 == 0.3
- False
-
- This problem is commonly encountered when writing tests, e.g. when making
- sure that floating-point values are what you expect them to be. One way to
- deal with this problem is to assert that two floating-point numbers are
- equal to within some appropriate tolerance::
-
- >>> abs((0.1 + 0.2) - 0.3) < 1e-6
- True
-
- However, comparisons like this are tedious to write and difficult to
- understand. Furthermore, absolute comparisons like the one above are
- usually discouraged because there's no tolerance that works well for all
- situations. ``1e-6`` is good for numbers around ``1``, but too small for
- very big numbers and too big for very small ones. It's better to express
- the tolerance as a fraction of the expected value, but relative comparisons
- like that are even more difficult to write correctly and concisely.
-
- The ``approx`` class performs floating-point comparisons using a syntax
- that's as intuitive as possible::
-
- >>> from pytest import approx
- >>> 0.1 + 0.2 == approx(0.3)
- True
-
- The same syntax also works for ordered sequences of numbers::
-
- >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
- True
-
- ``numpy`` arrays::
-
- >>> import numpy as np # doctest: +SKIP
- >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
- True
-
- And for a ``numpy`` array against a scalar::
-
- >>> import numpy as np # doctest: +SKIP
- >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
- True
-
- Only ordered sequences are supported, because ``approx`` needs
- to infer the relative position of the sequences without ambiguity. This means
- ``sets`` and other unordered sequences are not supported.
-
- Finally, dictionary *values* can also be compared::
-
- >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
- True
-
- The comparison will be true if both mappings have the same keys and their
- respective values match the expected tolerances.
-
- **Tolerances**
-
- By default, ``approx`` considers numbers within a relative tolerance of
- ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
- This treatment would lead to surprising results if the expected value was
- ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
- To handle this case less surprisingly, ``approx`` also considers numbers
- within an absolute tolerance of ``1e-12`` of its expected value to be
- equal. Infinity and NaN are special cases. Infinity is only considered
- equal to itself, regardless of the relative tolerance. NaN is not
- considered equal to anything by default, but you can make it be equal to
- itself by setting the ``nan_ok`` argument to True. (This is meant to
- facilitate comparing arrays that use NaN to mean "no data".)
-
- Both the relative and absolute tolerances can be changed by passing
- arguments to the ``approx`` constructor::
-
- >>> 1.0001 == approx(1)
- False
- >>> 1.0001 == approx(1, rel=1e-3)
- True
- >>> 1.0001 == approx(1, abs=1e-3)
- True
-
- If you specify ``abs`` but not ``rel``, the comparison will not consider
- the relative tolerance at all. In other words, two numbers that are within
- the default relative tolerance of ``1e-6`` will still be considered unequal
- if they exceed the specified absolute tolerance. If you specify both
- ``abs`` and ``rel``, the numbers will be considered equal if either
- tolerance is met::
-
- >>> 1 + 1e-8 == approx(1)
- True
- >>> 1 + 1e-8 == approx(1, abs=1e-12)
- False
- >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
- True
-
- You can also use ``approx`` to compare nonnumeric types, or dicts and
- sequences containing nonnumeric types, in which case it falls back to
- strict equality. This can be useful for comparing dicts and sequences that
- can contain optional values::
-
- >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None})
- True
- >>> [None, 1.0000005] == approx([None,1])
- True
- >>> ["foo", 1.0000005] == approx([None,1])
- False
-
- If you're thinking about using ``approx``, then you might want to know how
- it compares to other good ways of comparing floating-point numbers. All of
- these algorithms are based on relative and absolute tolerances and should
- agree for the most part, but they do have meaningful differences:
-
- - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
- tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
- tolerance is met. Because the relative tolerance is calculated w.r.t.
- both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
- ``b`` is a "reference value"). You have to specify an absolute tolerance
- if you want to compare to ``0.0`` because there is no tolerance by
- default. More information: :py:func:`math.isclose`.
-
- - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
- between ``a`` and ``b`` is less that the sum of the relative tolerance
- w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
- is only calculated w.r.t. ``b``, this test is asymmetric and you can
- think of ``b`` as the reference value. Support for comparing sequences
- is provided by :py:func:`numpy.allclose`. More information:
- :std:doc:`numpy:reference/generated/numpy.isclose`.
-
- - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
- are within an absolute tolerance of ``1e-7``. No relative tolerance is
- considered , so this function is not appropriate for very large or very
- small numbers. Also, it's only available in subclasses of ``unittest.TestCase``
- and it's ugly because it doesn't follow PEP8. More information:
- :py:meth:`unittest.TestCase.assertAlmostEqual`.
-
- - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
- tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
- Because the relative tolerance is only calculated w.r.t. ``b``, this test
- is asymmetric and you can think of ``b`` as the reference value. In the
- special case that you explicitly specify an absolute tolerance but not a
- relative tolerance, only the absolute tolerance is considered.
-
- .. note::
-
- ``approx`` can handle numpy arrays, but we recommend the
- specialised test helpers in :std:doc:`numpy:reference/routines.testing`
- if you need support for comparisons, NaNs, or ULP-based tolerances.
-
- .. warning::
-
- .. versionchanged:: 3.2
-
- In order to avoid inconsistent behavior, :py:exc:`TypeError` is
- raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
- The example below illustrates the problem::
-
- assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
- assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
-
- In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
- to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
- comparison. This is because the call hierarchy of rich comparisons
- follows a fixed behavior. More information: :py:meth:`object.__ge__`
-
- .. versionchanged:: 3.7.1
- ``approx`` raises ``TypeError`` when it encounters a dict value or
- sequence element of nonnumeric type.
-
- .. versionchanged:: 6.1.0
- ``approx`` falls back to strict equality for nonnumeric types instead
- of raising ``TypeError``.
- """
-
- # Delegate the comparison to a class that knows how to deal with the type
- # of the expected value (e.g. int, float, list, dict, numpy.array, etc).
- #
- # The primary responsibility of these classes is to implement ``__eq__()``
- # and ``__repr__()``. The former is used to actually check if some
- # "actual" value is equivalent to the given expected value within the
- # allowed tolerance. The latter is used to show the user the expected
- # value and tolerance, in the case that a test failed.
- #
- # The actual logic for making approximate comparisons can be found in
- # ApproxScalar, which is used to compare individual numbers. All of the
- # other Approx classes eventually delegate to this class. The ApproxBase
- # class provides some convenient methods and overloads, but isn't really
- # essential.
-
- __tracebackhide__ = True
-
- if isinstance(expected, Decimal):
- cls: Type[ApproxBase] = ApproxDecimal
- elif isinstance(expected, Mapping):
- cls = ApproxMapping
- elif _is_numpy_array(expected):
- expected = _as_numpy_array(expected)
- cls = ApproxNumpy
- elif (
- hasattr(expected, "__getitem__")
- and isinstance(expected, Sized)
- # Type ignored because the error is wrong -- not unreachable.
- and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable]
- ):
- cls = ApproxSequenceLike
- elif (
- isinstance(expected, Collection)
- # Type ignored because the error is wrong -- not unreachable.
- and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable]
- ):
- msg = f"pytest.approx() only supports ordered sequences, but got: {repr(expected)}"
- raise TypeError(msg)
- else:
- cls = ApproxScalar
-
- return cls(expected, rel, abs, nan_ok)
-
-
-def _is_numpy_array(obj: object) -> bool:
- """
- Return true if the given object is implicitly convertible to ndarray,
- and numpy is already imported.
- """
- return _as_numpy_array(obj) is not None
-
-
-def _as_numpy_array(obj: object) -> Optional["ndarray"]:
- """
- Return an ndarray if the given object is implicitly convertible to ndarray,
- and numpy is already imported, otherwise None.
- """
- import sys
-
- np: Any = sys.modules.get("numpy")
- if np is not None:
- # avoid infinite recursion on numpy scalars, which have __array__
- if np.isscalar(obj):
- return None
- elif isinstance(obj, np.ndarray):
- return obj
- elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"):
- return np.asarray(obj)
- return None
-
-
-# builtin pytest.raises helper
-
-E = TypeVar("E", bound=BaseException)
-
-
-@overload
-def raises(
- expected_exception: Union[Type[E], Tuple[Type[E], ...]],
- *,
- match: Optional[Union[str, Pattern[str]]] = ...,
-) -> "RaisesContext[E]":
- ...
-
-
-@overload
-def raises(
- expected_exception: Union[Type[E], Tuple[Type[E], ...]],
- func: Callable[..., Any],
- *args: Any,
- **kwargs: Any,
-) -> _pytest._code.ExceptionInfo[E]:
- ...
-
-
-def raises(
- expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any
-) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]:
- r"""Assert that a code block/function call raises ``expected_exception``
- or raise a failure exception otherwise.
-
- :kwparam match:
- If specified, a string containing a regular expression,
- or a regular expression object, that is tested against the string
- representation of the exception using :py:func:`re.search`. To match a literal
- string that may contain :std:ref:`special characters <re-syntax>`, the pattern can
- first be escaped with :py:func:`re.escape`.
-
- (This is only used when :py:func:`pytest.raises` is used as a context manager,
- and passed through to the function otherwise.
- When using :py:func:`pytest.raises` as a function, you can use:
- ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
-
- .. currentmodule:: _pytest._code
-
- Use ``pytest.raises`` as a context manager, which will capture the exception of the given
- type::
-
- >>> import pytest
- >>> with pytest.raises(ZeroDivisionError):
- ... 1/0
-
- If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
- above), or no exception at all, the check will fail instead.
-
- You can also use the keyword argument ``match`` to assert that the
- exception matches a text or regex::
-
- >>> with pytest.raises(ValueError, match='must be 0 or None'):
- ... raise ValueError("value must be 0 or None")
-
- >>> with pytest.raises(ValueError, match=r'must be \d+$'):
- ... raise ValueError("value must be 42")
-
- The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
- details of the captured exception::
-
- >>> with pytest.raises(ValueError) as exc_info:
- ... raise ValueError("value must be 42")
- >>> assert exc_info.type is ValueError
- >>> assert exc_info.value.args[0] == "value must be 42"
-
- .. note::
-
- When using ``pytest.raises`` as a context manager, it's worthwhile to
- note that normal context manager rules apply and that the exception
- raised *must* be the final line in the scope of the context manager.
- Lines of code after that, within the scope of the context manager will
- not be executed. For example::
-
- >>> value = 15
- >>> with pytest.raises(ValueError) as exc_info:
- ... if value > 10:
- ... raise ValueError("value must be <= 10")
- ... assert exc_info.type is ValueError # this will not execute
-
- Instead, the following approach must be taken (note the difference in
- scope)::
-
- >>> with pytest.raises(ValueError) as exc_info:
- ... if value > 10:
- ... raise ValueError("value must be <= 10")
- ...
- >>> assert exc_info.type is ValueError
-
- **Using with** ``pytest.mark.parametrize``
-
- When using :ref:`pytest.mark.parametrize ref`
- it is possible to parametrize tests such that
- some runs raise an exception and others do not.
-
- See :ref:`parametrizing_conditional_raising` for an example.
-
- **Legacy form**
-
- It is possible to specify a callable by passing a to-be-called lambda::
-
- >>> raises(ZeroDivisionError, lambda: 1/0)
- <ExceptionInfo ...>
-
- or you can specify an arbitrary callable with arguments::
-
- >>> def f(x): return 1/x
- ...
- >>> raises(ZeroDivisionError, f, 0)
- <ExceptionInfo ...>
- >>> raises(ZeroDivisionError, f, x=0)
- <ExceptionInfo ...>
-
- The form above is fully supported but discouraged for new code because the
- context manager form is regarded as more readable and less error-prone.
-
- .. note::
- Similar to caught exception objects in Python, explicitly clearing
- local references to returned ``ExceptionInfo`` objects can
- help the Python interpreter speed up its garbage collection.
-
- Clearing those references breaks a reference cycle
- (``ExceptionInfo`` --> caught exception --> frame stack raising
- the exception --> current frame stack --> local variables -->
- ``ExceptionInfo``) which makes Python keep all objects referenced
- from that cycle (including all local variables in the current
- frame) alive until the next cyclic garbage collection run.
- More detailed information can be found in the official Python
- documentation for :ref:`the try statement <python:try>`.
- """
- __tracebackhide__ = True
-
- if isinstance(expected_exception, type):
- excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
- else:
- excepted_exceptions = expected_exception
- for exc in excepted_exceptions:
- if not isinstance(exc, type) or not issubclass(exc, BaseException):
- msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable]
- not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
- raise TypeError(msg.format(not_a))
-
- message = f"DID NOT RAISE {expected_exception}"
-
- if not args:
- match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None)
- if kwargs:
- msg = "Unexpected keyword arguments passed to pytest.raises: "
- msg += ", ".join(sorted(kwargs))
- msg += "\nUse context-manager form instead?"
- raise TypeError(msg)
- return RaisesContext(expected_exception, message, match)
- else:
- func = args[0]
- if not callable(func):
- raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
- try:
- func(*args[1:], **kwargs)
- except expected_exception as e:
- # We just caught the exception - there is a traceback.
- assert e.__traceback__ is not None
- return _pytest._code.ExceptionInfo.from_exc_info(
- (type(e), e, e.__traceback__)
- )
- fail(message)
-
-
-# This doesn't work with mypy for now. Use fail.Exception instead.
-raises.Exception = fail.Exception # type: ignore
-
-
-@final
-class RaisesContext(Generic[E]):
- def __init__(
- self,
- expected_exception: Union[Type[E], Tuple[Type[E], ...]],
- message: str,
- match_expr: Optional[Union[str, Pattern[str]]] = None,
- ) -> None:
- self.expected_exception = expected_exception
- self.message = message
- self.match_expr = match_expr
- self.excinfo: Optional[_pytest._code.ExceptionInfo[E]] = None
-
- def __enter__(self) -> _pytest._code.ExceptionInfo[E]:
- self.excinfo = _pytest._code.ExceptionInfo.for_later()
- return self.excinfo
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> bool:
- __tracebackhide__ = True
- if exc_type is None:
- fail(self.message)
- assert self.excinfo is not None
- if not issubclass(exc_type, self.expected_exception):
- return False
- # Cast to narrow the exception type now that it's verified.
- exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb))
- self.excinfo.fill_unfilled(exc_info)
- if self.match_expr is not None:
- self.excinfo.match(self.match_expr)
- return True
diff --git a/contrib/python/pytest/py3/_pytest/python_path.py b/contrib/python/pytest/py3/_pytest/python_path.py
deleted file mode 100644
index cceabbca12..0000000000
--- a/contrib/python/pytest/py3/_pytest/python_path.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import sys
-
-import pytest
-from pytest import Config
-from pytest import Parser
-
-
-def pytest_addoption(parser: Parser) -> None:
- parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[])
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_load_initial_conftests(early_config: Config) -> None:
- # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]`
- for path in reversed(early_config.getini("pythonpath")):
- sys.path.insert(0, str(path))
-
-
-@pytest.hookimpl(trylast=True)
-def pytest_unconfigure(config: Config) -> None:
- for path in config.getini("pythonpath"):
- path_str = str(path)
- if path_str in sys.path:
- sys.path.remove(path_str)
diff --git a/contrib/python/pytest/py3/_pytest/recwarn.py b/contrib/python/pytest/py3/_pytest/recwarn.py
deleted file mode 100644
index 175b571a80..0000000000
--- a/contrib/python/pytest/py3/_pytest/recwarn.py
+++ /dev/null
@@ -1,296 +0,0 @@
-"""Record warnings during test function execution."""
-import re
-import warnings
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import Generator
-from typing import Iterator
-from typing import List
-from typing import Optional
-from typing import overload
-from typing import Pattern
-from typing import Tuple
-from typing import Type
-from typing import TypeVar
-from typing import Union
-
-from _pytest.compat import final
-from _pytest.deprecated import check_ispytest
-from _pytest.deprecated import WARNS_NONE_ARG
-from _pytest.fixtures import fixture
-from _pytest.outcomes import fail
-
-
-T = TypeVar("T")
-
-
-@fixture
-def recwarn() -> Generator["WarningsRecorder", None, None]:
- """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
-
- See https://docs.python.org/library/how-to/capture-warnings.html for information
- on warning categories.
- """
- wrec = WarningsRecorder(_ispytest=True)
- with wrec:
- warnings.simplefilter("default")
- yield wrec
-
-
-@overload
-def deprecated_call(
- *, match: Optional[Union[str, Pattern[str]]] = ...
-) -> "WarningsRecorder":
- ...
-
-
-@overload
-def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
- ...
-
-
-def deprecated_call(
- func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any
-) -> Union["WarningsRecorder", Any]:
- """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``.
-
- This function can be used as a context manager::
-
- >>> import warnings
- >>> def api_call_v2():
- ... warnings.warn('use v3 of this api', DeprecationWarning)
- ... return 200
-
- >>> import pytest
- >>> with pytest.deprecated_call():
- ... assert api_call_v2() == 200
-
- It can also be used by passing a function and ``*args`` and ``**kwargs``,
- in which case it will ensure calling ``func(*args, **kwargs)`` produces one of
- the warnings types above. The return value is the return value of the function.
-
- In the context manager form you may use the keyword argument ``match`` to assert
- that the warning matches a text or regex.
-
- The context manager produces a list of :class:`warnings.WarningMessage` objects,
- one for each warning raised.
- """
- __tracebackhide__ = True
- if func is not None:
- args = (func,) + args
- return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)
-
-
-@overload
-def warns(
- expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = ...,
- *,
- match: Optional[Union[str, Pattern[str]]] = ...,
-) -> "WarningsChecker":
- ...
-
-
-@overload
-def warns(
- expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]],
- func: Callable[..., T],
- *args: Any,
- **kwargs: Any,
-) -> T:
- ...
-
-
-def warns(
- expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = Warning,
- *args: Any,
- match: Optional[Union[str, Pattern[str]]] = None,
- **kwargs: Any,
-) -> Union["WarningsChecker", Any]:
- r"""Assert that code raises a particular class of warning.
-
- Specifically, the parameter ``expected_warning`` can be a warning class or
- sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or
- classes.
-
- This helper produces a list of :class:`warnings.WarningMessage` objects,
- one for each warning raised.
-
- This function can be used as a context manager, or any of the other ways
- :func:`pytest.raises` can be used::
-
- >>> import pytest
- >>> with pytest.warns(RuntimeWarning):
- ... warnings.warn("my warning", RuntimeWarning)
-
- In the context manager form you may use the keyword argument ``match`` to assert
- that the warning matches a text or regex::
-
- >>> with pytest.warns(UserWarning, match='must be 0 or None'):
- ... warnings.warn("value must be 0 or None", UserWarning)
-
- >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
- ... warnings.warn("value must be 42", UserWarning)
-
- >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
- ... warnings.warn("this is not here", UserWarning)
- Traceback (most recent call last):
- ...
- Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted...
-
- """
- __tracebackhide__ = True
- if not args:
- if kwargs:
- msg = "Unexpected keyword arguments passed to pytest.warns: "
- msg += ", ".join(sorted(kwargs))
- msg += "\nUse context-manager form instead?"
- raise TypeError(msg)
- return WarningsChecker(expected_warning, match_expr=match, _ispytest=True)
- else:
- func = args[0]
- if not callable(func):
- raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
- with WarningsChecker(expected_warning, _ispytest=True):
- return func(*args[1:], **kwargs)
-
-
-class WarningsRecorder(warnings.catch_warnings):
- """A context manager to record raised warnings.
-
- Adapted from `warnings.catch_warnings`.
- """
-
- def __init__(self, *, _ispytest: bool = False) -> None:
- check_ispytest(_ispytest)
- # Type ignored due to the way typeshed handles warnings.catch_warnings.
- super().__init__(record=True) # type: ignore[call-arg]
- self._entered = False
- self._list: List[warnings.WarningMessage] = []
-
- @property
- def list(self) -> List["warnings.WarningMessage"]:
- """The list of recorded warnings."""
- return self._list
-
- def __getitem__(self, i: int) -> "warnings.WarningMessage":
- """Get a recorded warning by index."""
- return self._list[i]
-
- def __iter__(self) -> Iterator["warnings.WarningMessage"]:
- """Iterate through the recorded warnings."""
- return iter(self._list)
-
- def __len__(self) -> int:
- """The number of recorded warnings."""
- return len(self._list)
-
- def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage":
- """Pop the first recorded warning, raise exception if not exists."""
- for i, w in enumerate(self._list):
- if issubclass(w.category, cls):
- return self._list.pop(i)
- __tracebackhide__ = True
- raise AssertionError("%r not found in warning list" % cls)
-
- def clear(self) -> None:
- """Clear the list of recorded warnings."""
- self._list[:] = []
-
- # Type ignored because it doesn't exactly warnings.catch_warnings.__enter__
- # -- it returns a List but we only emulate one.
- def __enter__(self) -> "WarningsRecorder": # type: ignore
- if self._entered:
- __tracebackhide__ = True
- raise RuntimeError("Cannot enter %r twice" % self)
- _list = super().__enter__()
- # record=True means it's None.
- assert _list is not None
- self._list = _list
- warnings.simplefilter("always")
- return self
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- if not self._entered:
- __tracebackhide__ = True
- raise RuntimeError("Cannot exit %r without entering first" % self)
-
- super().__exit__(exc_type, exc_val, exc_tb)
-
- # Built-in catch_warnings does not reset entered state so we do it
- # manually here for this context manager to become reusable.
- self._entered = False
-
-
-@final
-class WarningsChecker(WarningsRecorder):
- def __init__(
- self,
- expected_warning: Optional[
- Union[Type[Warning], Tuple[Type[Warning], ...]]
- ] = Warning,
- match_expr: Optional[Union[str, Pattern[str]]] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- super().__init__(_ispytest=True)
-
- msg = "exceptions must be derived from Warning, not %s"
- if expected_warning is None:
- warnings.warn(WARNS_NONE_ARG, stacklevel=4)
- expected_warning_tup = None
- elif isinstance(expected_warning, tuple):
- for exc in expected_warning:
- if not issubclass(exc, Warning):
- raise TypeError(msg % type(exc))
- expected_warning_tup = expected_warning
- elif issubclass(expected_warning, Warning):
- expected_warning_tup = (expected_warning,)
- else:
- raise TypeError(msg % type(expected_warning))
-
- self.expected_warning = expected_warning_tup
- self.match_expr = match_expr
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- super().__exit__(exc_type, exc_val, exc_tb)
-
- __tracebackhide__ = True
-
- # only check if we're not currently handling an exception
- if exc_type is None and exc_val is None and exc_tb is None:
- if self.expected_warning is not None:
- if not any(issubclass(r.category, self.expected_warning) for r in self):
- __tracebackhide__ = True
- fail(
- "DID NOT WARN. No warnings of type {} were emitted. "
- "The list of emitted warnings is: {}.".format(
- self.expected_warning, [each.message for each in self]
- )
- )
- elif self.match_expr is not None:
- for r in self:
- if issubclass(r.category, self.expected_warning):
- if re.compile(self.match_expr).search(str(r.message)):
- break
- else:
- fail(
- "DID NOT WARN. No warnings of type {} matching"
- " ('{}') were emitted. The list of emitted warnings"
- " is: {}.".format(
- self.expected_warning,
- self.match_expr,
- [each.message for each in self],
- )
- )
diff --git a/contrib/python/pytest/py3/_pytest/reports.py b/contrib/python/pytest/py3/_pytest/reports.py
deleted file mode 100644
index 725fdf6173..0000000000
--- a/contrib/python/pytest/py3/_pytest/reports.py
+++ /dev/null
@@ -1,599 +0,0 @@
-import os
-from io import StringIO
-from pprint import pprint
-from typing import Any
-from typing import cast
-from typing import Dict
-from typing import Iterable
-from typing import Iterator
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import attr
-
-from _pytest._code.code import ExceptionChainRepr
-from _pytest._code.code import ExceptionInfo
-from _pytest._code.code import ExceptionRepr
-from _pytest._code.code import ReprEntry
-from _pytest._code.code import ReprEntryNative
-from _pytest._code.code import ReprExceptionInfo
-from _pytest._code.code import ReprFileLocation
-from _pytest._code.code import ReprFuncArgs
-from _pytest._code.code import ReprLocals
-from _pytest._code.code import ReprTraceback
-from _pytest._code.code import TerminalRepr
-from _pytest._io import TerminalWriter
-from _pytest.compat import final
-from _pytest.config import Config
-from _pytest.nodes import Collector
-from _pytest.nodes import Item
-from _pytest.outcomes import skip
-
-if TYPE_CHECKING:
- from typing import NoReturn
- from typing_extensions import Literal
-
- from _pytest.runner import CallInfo
-
-
-def getworkerinfoline(node):
- try:
- return node._workerinfocache
- except AttributeError:
- d = node.workerinfo
- ver = "%s.%s.%s" % d["version_info"][:3]
- node._workerinfocache = s = "[{}] {} -- Python {} {}".format(
- d["id"], d["sysplatform"], ver, d["executable"]
- )
- return s
-
-
-_R = TypeVar("_R", bound="BaseReport")
-
-
-class BaseReport:
- when: Optional[str]
- location: Optional[Tuple[str, Optional[int], str]]
- longrepr: Union[
- None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
- ]
- sections: List[Tuple[str, str]]
- nodeid: str
- outcome: "Literal['passed', 'failed', 'skipped']"
-
- def __init__(self, **kw: Any) -> None:
- self.__dict__.update(kw)
-
- if TYPE_CHECKING:
- # Can have arbitrary fields given to __init__().
- def __getattr__(self, key: str) -> Any:
- ...
-
- def toterminal(self, out: TerminalWriter) -> None:
- if hasattr(self, "node"):
- worker_info = getworkerinfoline(self.node)
- if worker_info:
- out.line(worker_info)
-
- longrepr = self.longrepr
- if longrepr is None:
- return
-
- if hasattr(longrepr, "toterminal"):
- longrepr_terminal = cast(TerminalRepr, longrepr)
- longrepr_terminal.toterminal(out)
- else:
- try:
- s = str(longrepr)
- except UnicodeEncodeError:
- s = "<unprintable longrepr>"
- out.line(s)
-
- def get_sections(self, prefix: str) -> Iterator[Tuple[str, str]]:
- for name, content in self.sections:
- if name.startswith(prefix):
- yield prefix, content
-
- @property
- def longreprtext(self) -> str:
- """Read-only property that returns the full string representation of
- ``longrepr``.
-
- .. versionadded:: 3.0
- """
- file = StringIO()
- tw = TerminalWriter(file)
- tw.hasmarkup = False
- self.toterminal(tw)
- exc = file.getvalue()
- return exc.strip()
-
- @property
- def caplog(self) -> str:
- """Return captured log lines, if log capturing is enabled.
-
- .. versionadded:: 3.5
- """
- return "\n".join(
- content for (prefix, content) in self.get_sections("Captured log")
- )
-
- @property
- def capstdout(self) -> str:
- """Return captured text from stdout, if capturing is enabled.
-
- .. versionadded:: 3.0
- """
- return "".join(
- content for (prefix, content) in self.get_sections("Captured stdout")
- )
-
- @property
- def capstderr(self) -> str:
- """Return captured text from stderr, if capturing is enabled.
-
- .. versionadded:: 3.0
- """
- return "".join(
- content for (prefix, content) in self.get_sections("Captured stderr")
- )
-
- @property
- def passed(self) -> bool:
- """Whether the outcome is passed."""
- return self.outcome == "passed"
-
- @property
- def failed(self) -> bool:
- """Whether the outcome is failed."""
- return self.outcome == "failed"
-
- @property
- def skipped(self) -> bool:
- """Whether the outcome is skipped."""
- return self.outcome == "skipped"
-
- @property
- def fspath(self) -> str:
- """The path portion of the reported node, as a string."""
- return self.nodeid.split("::")[0]
-
- @property
- def count_towards_summary(self) -> bool:
- """**Experimental** Whether this report should be counted towards the
- totals shown at the end of the test session: "1 passed, 1 failure, etc".
-
- .. note::
-
- This function is considered **experimental**, so beware that it is subject to changes
- even in patch releases.
- """
- return True
-
- @property
- def head_line(self) -> Optional[str]:
- """**Experimental** The head line shown with longrepr output for this
- report, more commonly during traceback representation during
- failures::
-
- ________ Test.foo ________
-
-
- In the example above, the head_line is "Test.foo".
-
- .. note::
-
- This function is considered **experimental**, so beware that it is subject to changes
- even in patch releases.
- """
- if self.location is not None:
- fspath, lineno, domain = self.location
- return domain
- return None
-
- def _get_verbose_word(self, config: Config):
- _category, _short, verbose = config.hook.pytest_report_teststatus(
- report=self, config=config
- )
- return verbose
-
- def _to_json(self) -> Dict[str, Any]:
- """Return the contents of this report as a dict of builtin entries,
- suitable for serialization.
-
- This was originally the serialize_report() function from xdist (ca03269).
-
- Experimental method.
- """
- return _report_to_json(self)
-
- @classmethod
- def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R:
- """Create either a TestReport or CollectReport, depending on the calling class.
-
- It is the callers responsibility to know which class to pass here.
-
- This was originally the serialize_report() function from xdist (ca03269).
-
- Experimental method.
- """
- kwargs = _report_kwargs_from_json(reportdict)
- return cls(**kwargs)
-
-
-def _report_unserialization_failure(
- type_name: str, report_class: Type[BaseReport], reportdict
-) -> "NoReturn":
- url = "https://github.com/pytest-dev/pytest/issues"
- stream = StringIO()
- pprint("-" * 100, stream=stream)
- pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
- pprint("report_name: %s" % report_class, stream=stream)
- pprint(reportdict, stream=stream)
- pprint("Please report this bug at %s" % url, stream=stream)
- pprint("-" * 100, stream=stream)
- raise RuntimeError(stream.getvalue())
-
-
-@final
-class TestReport(BaseReport):
- """Basic test report object (also used for setup and teardown calls if
- they fail).
-
- Reports can contain arbitrary extra attributes.
- """
-
- __test__ = False
-
- def __init__(
- self,
- nodeid: str,
- location: Tuple[str, Optional[int], str],
- keywords: Mapping[str, Any],
- outcome: "Literal['passed', 'failed', 'skipped']",
- longrepr: Union[
- None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
- ],
- when: "Literal['setup', 'call', 'teardown']",
- sections: Iterable[Tuple[str, str]] = (),
- duration: float = 0,
- user_properties: Optional[Iterable[Tuple[str, object]]] = None,
- **extra,
- ) -> None:
- #: Normalized collection nodeid.
- self.nodeid = nodeid
-
- #: A (filesystempath, lineno, domaininfo) tuple indicating the
- #: actual location of a test item - it might be different from the
- #: collected one e.g. if a method is inherited from a different module.
- self.location: Tuple[str, Optional[int], str] = location
-
- #: A name -> value dictionary containing all keywords and
- #: markers associated with a test invocation.
- self.keywords = keywords
-
- #: Test outcome, always one of "passed", "failed", "skipped".
- self.outcome = outcome
-
- #: None or a failure representation.
- self.longrepr = longrepr
-
- #: One of 'setup', 'call', 'teardown' to indicate runtest phase.
- self.when = when
-
- #: User properties is a list of tuples (name, value) that holds user
- #: defined properties of the test.
- self.user_properties = list(user_properties or [])
-
- #: Tuples of str ``(heading, content)`` with extra information
- #: for the test report. Used by pytest to add text captured
- #: from ``stdout``, ``stderr``, and intercepted logging events. May
- #: be used by other plugins to add arbitrary information to reports.
- self.sections = list(sections)
-
- #: Time it took to run just the test.
- self.duration = duration
-
- self.__dict__.update(extra)
-
- def __repr__(self) -> str:
- return "<{} {!r} when={!r} outcome={!r}>".format(
- self.__class__.__name__, self.nodeid, self.when, self.outcome
- )
-
- @classmethod
- def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport":
- """Create and fill a TestReport with standard item and call info."""
- when = call.when
- # Remove "collect" from the Literal type -- only for collection calls.
- assert when != "collect"
- duration = call.duration
- keywords = {x: 1 for x in item.keywords}
- excinfo = call.excinfo
- sections = []
- if not call.excinfo:
- outcome: Literal["passed", "failed", "skipped"] = "passed"
- longrepr: Union[
- None,
- ExceptionInfo[BaseException],
- Tuple[str, int, str],
- str,
- TerminalRepr,
- ] = None
- else:
- if not isinstance(excinfo, ExceptionInfo):
- outcome = "failed"
- longrepr = excinfo
- elif isinstance(excinfo.value, skip.Exception):
- outcome = "skipped"
- r = excinfo._getreprcrash()
- if excinfo.value._use_item_location:
- path, line = item.reportinfo()[:2]
- assert line is not None
- longrepr = os.fspath(path), line + 1, r.message
- else:
- longrepr = (str(r.path), r.lineno, r.message)
- else:
- outcome = "failed"
- if call.when == "call":
- longrepr = item.repr_failure(excinfo)
- else: # exception in setup or teardown
- longrepr = item._repr_failure_py(
- excinfo, style=item.config.getoption("tbstyle", "auto")
- )
- for rwhen, key, content in item._report_sections:
- sections.append((f"Captured {key} {rwhen}", content))
- return cls(
- item.nodeid,
- item.location,
- keywords,
- outcome,
- longrepr,
- when,
- sections,
- duration,
- user_properties=item.user_properties,
- )
-
-
-@final
-class CollectReport(BaseReport):
- """Collection report object.
-
- Reports can contain arbitrary extra attributes.
- """
-
- when = "collect"
-
- def __init__(
- self,
- nodeid: str,
- outcome: "Literal['passed', 'failed', 'skipped']",
- longrepr: Union[
- None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
- ],
- result: Optional[List[Union[Item, Collector]]],
- sections: Iterable[Tuple[str, str]] = (),
- **extra,
- ) -> None:
- #: Normalized collection nodeid.
- self.nodeid = nodeid
-
- #: Test outcome, always one of "passed", "failed", "skipped".
- self.outcome = outcome
-
- #: None or a failure representation.
- self.longrepr = longrepr
-
- #: The collected items and collection nodes.
- self.result = result or []
-
- #: Tuples of str ``(heading, content)`` with extra information
- #: for the test report. Used by pytest to add text captured
- #: from ``stdout``, ``stderr``, and intercepted logging events. May
- #: be used by other plugins to add arbitrary information to reports.
- self.sections = list(sections)
-
- self.__dict__.update(extra)
-
- @property
- def location(self):
- return (self.fspath, None, self.fspath)
-
- def __repr__(self) -> str:
- return "<CollectReport {!r} lenresult={} outcome={!r}>".format(
- self.nodeid, len(self.result), self.outcome
- )
-
-
-class CollectErrorRepr(TerminalRepr):
- def __init__(self, msg: str) -> None:
- self.longrepr = msg
-
- def toterminal(self, out: TerminalWriter) -> None:
- out.line(self.longrepr, red=True)
-
-
-def pytest_report_to_serializable(
- report: Union[CollectReport, TestReport]
-) -> Optional[Dict[str, Any]]:
- if isinstance(report, (TestReport, CollectReport)):
- data = report._to_json()
- data["$report_type"] = report.__class__.__name__
- return data
- # TODO: Check if this is actually reachable.
- return None # type: ignore[unreachable]
-
-
-def pytest_report_from_serializable(
- data: Dict[str, Any],
-) -> Optional[Union[CollectReport, TestReport]]:
- if "$report_type" in data:
- if data["$report_type"] == "TestReport":
- return TestReport._from_json(data)
- elif data["$report_type"] == "CollectReport":
- return CollectReport._from_json(data)
- assert False, "Unknown report_type unserialize data: {}".format(
- data["$report_type"]
- )
- return None
-
-
-def _report_to_json(report: BaseReport) -> Dict[str, Any]:
- """Return the contents of this report as a dict of builtin entries,
- suitable for serialization.
-
- This was originally the serialize_report() function from xdist (ca03269).
- """
-
- def serialize_repr_entry(
- entry: Union[ReprEntry, ReprEntryNative]
- ) -> Dict[str, Any]:
- data = attr.asdict(entry)
- for key, value in data.items():
- if hasattr(value, "__dict__"):
- data[key] = attr.asdict(value)
- entry_data = {"type": type(entry).__name__, "data": data}
- return entry_data
-
- def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]:
- result = attr.asdict(reprtraceback)
- result["reprentries"] = [
- serialize_repr_entry(x) for x in reprtraceback.reprentries
- ]
- return result
-
- def serialize_repr_crash(
- reprcrash: Optional[ReprFileLocation],
- ) -> Optional[Dict[str, Any]]:
- if reprcrash is not None:
- return attr.asdict(reprcrash)
- else:
- return None
-
- def serialize_exception_longrepr(rep: BaseReport) -> Dict[str, Any]:
- assert rep.longrepr is not None
- # TODO: Investigate whether the duck typing is really necessary here.
- longrepr = cast(ExceptionRepr, rep.longrepr)
- result: Dict[str, Any] = {
- "reprcrash": serialize_repr_crash(longrepr.reprcrash),
- "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback),
- "sections": longrepr.sections,
- }
- if isinstance(longrepr, ExceptionChainRepr):
- result["chain"] = []
- for repr_traceback, repr_crash, description in longrepr.chain:
- result["chain"].append(
- (
- serialize_repr_traceback(repr_traceback),
- serialize_repr_crash(repr_crash),
- description,
- )
- )
- else:
- result["chain"] = None
- return result
-
- d = report.__dict__.copy()
- if hasattr(report.longrepr, "toterminal"):
- if hasattr(report.longrepr, "reprtraceback") and hasattr(
- report.longrepr, "reprcrash"
- ):
- d["longrepr"] = serialize_exception_longrepr(report)
- else:
- d["longrepr"] = str(report.longrepr)
- else:
- d["longrepr"] = report.longrepr
- for name in d:
- if isinstance(d[name], os.PathLike):
- d[name] = os.fspath(d[name])
- elif name == "result":
- d[name] = None # for now
- return d
-
-
-def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]:
- """Return **kwargs that can be used to construct a TestReport or
- CollectReport instance.
-
- This was originally the serialize_report() function from xdist (ca03269).
- """
-
- def deserialize_repr_entry(entry_data):
- data = entry_data["data"]
- entry_type = entry_data["type"]
- if entry_type == "ReprEntry":
- reprfuncargs = None
- reprfileloc = None
- reprlocals = None
- if data["reprfuncargs"]:
- reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
- if data["reprfileloc"]:
- reprfileloc = ReprFileLocation(**data["reprfileloc"])
- if data["reprlocals"]:
- reprlocals = ReprLocals(data["reprlocals"]["lines"])
-
- reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry(
- lines=data["lines"],
- reprfuncargs=reprfuncargs,
- reprlocals=reprlocals,
- reprfileloc=reprfileloc,
- style=data["style"],
- )
- elif entry_type == "ReprEntryNative":
- reprentry = ReprEntryNative(data["lines"])
- else:
- _report_unserialization_failure(entry_type, TestReport, reportdict)
- return reprentry
-
- def deserialize_repr_traceback(repr_traceback_dict):
- repr_traceback_dict["reprentries"] = [
- deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"]
- ]
- return ReprTraceback(**repr_traceback_dict)
-
- def deserialize_repr_crash(repr_crash_dict: Optional[Dict[str, Any]]):
- if repr_crash_dict is not None:
- return ReprFileLocation(**repr_crash_dict)
- else:
- return None
-
- if (
- reportdict["longrepr"]
- and "reprcrash" in reportdict["longrepr"]
- and "reprtraceback" in reportdict["longrepr"]
- ):
-
- reprtraceback = deserialize_repr_traceback(
- reportdict["longrepr"]["reprtraceback"]
- )
- reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"])
- if reportdict["longrepr"]["chain"]:
- chain = []
- for repr_traceback_data, repr_crash_data, description in reportdict[
- "longrepr"
- ]["chain"]:
- chain.append(
- (
- deserialize_repr_traceback(repr_traceback_data),
- deserialize_repr_crash(repr_crash_data),
- description,
- )
- )
- exception_info: Union[
- ExceptionChainRepr, ReprExceptionInfo
- ] = ExceptionChainRepr(chain)
- else:
- exception_info = ReprExceptionInfo(reprtraceback, reprcrash)
-
- for section in reportdict["longrepr"]["sections"]:
- exception_info.addsection(*section)
- reportdict["longrepr"] = exception_info
-
- return reportdict
diff --git a/contrib/python/pytest/py3/_pytest/runner.py b/contrib/python/pytest/py3/_pytest/runner.py
deleted file mode 100644
index df6eecdb12..0000000000
--- a/contrib/python/pytest/py3/_pytest/runner.py
+++ /dev/null
@@ -1,541 +0,0 @@
-"""Basic collect and runtest protocol implementations."""
-import bdb
-import os
-import sys
-from typing import Callable
-from typing import cast
-from typing import Dict
-from typing import Generic
-from typing import List
-from typing import Optional
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import TypeVar
-from typing import Union
-
-import attr
-
-from .reports import BaseReport
-from .reports import CollectErrorRepr
-from .reports import CollectReport
-from .reports import TestReport
-from _pytest import timing
-from _pytest._code.code import ExceptionChainRepr
-from _pytest._code.code import ExceptionInfo
-from _pytest._code.code import TerminalRepr
-from _pytest.compat import final
-from _pytest.config.argparsing import Parser
-from _pytest.deprecated import check_ispytest
-from _pytest.nodes import Collector
-from _pytest.nodes import Item
-from _pytest.nodes import Node
-from _pytest.outcomes import Exit
-from _pytest.outcomes import OutcomeException
-from _pytest.outcomes import Skipped
-from _pytest.outcomes import TEST_OUTCOME
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
- from _pytest.main import Session
- from _pytest.terminal import TerminalReporter
-
-#
-# pytest plugin hooks.
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("terminal reporting", "reporting", after="general")
- group.addoption(
- "--durations",
- action="store",
- type=int,
- default=None,
- metavar="N",
- help="show N slowest setup/test durations (N=0 for all).",
- )
- group.addoption(
- "--durations-min",
- action="store",
- type=float,
- default=0.005,
- metavar="N",
- help="Minimal duration in seconds for inclusion in slowest list. Default 0.005",
- )
-
-
-def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None:
- durations = terminalreporter.config.option.durations
- durations_min = terminalreporter.config.option.durations_min
- verbose = terminalreporter.config.getvalue("verbose")
- if durations is None:
- return
- tr = terminalreporter
- dlist = []
- for replist in tr.stats.values():
- for rep in replist:
- if hasattr(rep, "duration"):
- dlist.append(rep)
- if not dlist:
- return
- dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return]
- if not durations:
- tr.write_sep("=", "slowest durations")
- else:
- tr.write_sep("=", "slowest %s durations" % durations)
- dlist = dlist[:durations]
-
- for i, rep in enumerate(dlist):
- if verbose < 2 and rep.duration < durations_min:
- tr.write_line("")
- tr.write_line(
- "(%s durations < %gs hidden. Use -vv to show these durations.)"
- % (len(dlist) - i, durations_min)
- )
- break
- tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
-
-
-def pytest_sessionstart(session: "Session") -> None:
- session._setupstate = SetupState()
-
-
-def pytest_sessionfinish(session: "Session") -> None:
- session._setupstate.teardown_exact(None)
-
-
-def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool:
- ihook = item.ihook
- ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
- runtestprotocol(item, nextitem=nextitem)
- ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
- return True
-
-
-def runtestprotocol(
- item: Item, log: bool = True, nextitem: Optional[Item] = None
-) -> List[TestReport]:
- hasrequest = hasattr(item, "_request")
- if hasrequest and not item._request: # type: ignore[attr-defined]
- # This only happens if the item is re-run, as is done by
- # pytest-rerunfailures.
- item._initrequest() # type: ignore[attr-defined]
- rep = call_and_report(item, "setup", log)
- reports = [rep]
- if rep.passed:
- if item.config.getoption("setupshow", False):
- show_test_item(item)
- if not item.config.getoption("setuponly", False):
- reports.append(call_and_report(item, "call", log))
- reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
- # After all teardown hooks have been called
- # want funcargs and request info to go away.
- if hasrequest:
- item._request = False # type: ignore[attr-defined]
- item.funcargs = None # type: ignore[attr-defined]
- return reports
-
-
-def show_test_item(item: Item) -> None:
- """Show test function, parameters and the fixtures of the test item."""
- tw = item.config.get_terminal_writer()
- tw.line()
- tw.write(" " * 8)
- tw.write(item.nodeid)
- used_fixtures = sorted(getattr(item, "fixturenames", []))
- if used_fixtures:
- tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
- tw.flush()
-
-
-def pytest_runtest_setup(item: Item) -> None:
- _update_current_test_var(item, "setup")
- item.session._setupstate.setup(item)
-
-
-def pytest_runtest_call(item: Item) -> None:
- _update_current_test_var(item, "call")
- try:
- del sys.last_type
- del sys.last_value
- del sys.last_traceback
- except AttributeError:
- pass
- try:
- item.runtest()
- except Exception as e:
- # Store trace info to allow postmortem debugging
- sys.last_type = type(e)
- sys.last_value = e
- assert e.__traceback__ is not None
- # Skip *this* frame
- sys.last_traceback = e.__traceback__.tb_next
- raise e
-
-
-def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None:
- _update_current_test_var(item, "teardown")
- item.session._setupstate.teardown_exact(nextitem)
- _update_current_test_var(item, None)
-
-
-def _update_current_test_var(
- item: Item, when: Optional["Literal['setup', 'call', 'teardown']"]
-) -> None:
- """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
-
- If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment.
- """
- var_name = "PYTEST_CURRENT_TEST"
- if when:
- value = f"{item.nodeid} ({when})"
- # don't allow null bytes on environment variables (see #2644, #2957)
- value = value.replace("\x00", "(null)")
- os.environ[var_name] = value
- else:
- os.environ.pop(var_name)
-
-
-def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
- if report.when in ("setup", "teardown"):
- if report.failed:
- # category, shortletter, verbose-word
- return "error", "E", "ERROR"
- elif report.skipped:
- return "skipped", "s", "SKIPPED"
- else:
- return "", "", ""
- return None
-
-
-#
-# Implementation
-
-
-def call_and_report(
- item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds
-) -> TestReport:
- call = call_runtest_hook(item, when, **kwds)
- hook = item.ihook
- report: TestReport = hook.pytest_runtest_makereport(item=item, call=call)
- if log:
- hook.pytest_runtest_logreport(report=report)
- if check_interactive_exception(call, report):
- hook.pytest_exception_interact(node=item, call=call, report=report)
- return report
-
-
-def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool:
- """Check whether the call raised an exception that should be reported as
- interactive."""
- if call.excinfo is None:
- # Didn't raise.
- return False
- if hasattr(report, "wasxfail"):
- # Exception was expected.
- return False
- if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)):
- # Special control flow exception.
- return False
- return True
-
-
-def call_runtest_hook(
- item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds
-) -> "CallInfo[None]":
- if when == "setup":
- ihook: Callable[..., None] = item.ihook.pytest_runtest_setup
- elif when == "call":
- ihook = item.ihook.pytest_runtest_call
- elif when == "teardown":
- ihook = item.ihook.pytest_runtest_teardown
- else:
- assert False, f"Unhandled runtest hook case: {when}"
- reraise: Tuple[Type[BaseException], ...] = (Exit,)
- if not item.config.getoption("usepdb", False):
- reraise += (KeyboardInterrupt,)
- return CallInfo.from_call(
- lambda: ihook(item=item, **kwds), when=when, reraise=reraise
- )
-
-
-TResult = TypeVar("TResult", covariant=True)
-
-
-@final
-@attr.s(repr=False, init=False, auto_attribs=True)
-class CallInfo(Generic[TResult]):
- """Result/Exception info of a function invocation."""
-
- _result: Optional[TResult]
- #: The captured exception of the call, if it raised.
- excinfo: Optional[ExceptionInfo[BaseException]]
- #: The system time when the call started, in seconds since the epoch.
- start: float
- #: The system time when the call ended, in seconds since the epoch.
- stop: float
- #: The call duration, in seconds.
- duration: float
- #: The context of invocation: "collect", "setup", "call" or "teardown".
- when: "Literal['collect', 'setup', 'call', 'teardown']"
-
- def __init__(
- self,
- result: Optional[TResult],
- excinfo: Optional[ExceptionInfo[BaseException]],
- start: float,
- stop: float,
- duration: float,
- when: "Literal['collect', 'setup', 'call', 'teardown']",
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- self._result = result
- self.excinfo = excinfo
- self.start = start
- self.stop = stop
- self.duration = duration
- self.when = when
-
- @property
- def result(self) -> TResult:
- """The return value of the call, if it didn't raise.
-
- Can only be accessed if excinfo is None.
- """
- if self.excinfo is not None:
- raise AttributeError(f"{self!r} has no valid result")
- # The cast is safe because an exception wasn't raised, hence
- # _result has the expected function return type (which may be
- # None, that's why a cast and not an assert).
- return cast(TResult, self._result)
-
- @classmethod
- def from_call(
- cls,
- func: "Callable[[], TResult]",
- when: "Literal['collect', 'setup', 'call', 'teardown']",
- reraise: Optional[
- Union[Type[BaseException], Tuple[Type[BaseException], ...]]
- ] = None,
- ) -> "CallInfo[TResult]":
- """Call func, wrapping the result in a CallInfo.
-
- :param func:
- The function to call. Called without arguments.
- :param when:
- The phase in which the function is called.
- :param reraise:
- Exception or exceptions that shall propagate if raised by the
- function, instead of being wrapped in the CallInfo.
- """
- excinfo = None
- start = timing.time()
- precise_start = timing.perf_counter()
- try:
- result: Optional[TResult] = func()
- except BaseException:
- excinfo = ExceptionInfo.from_current()
- if reraise is not None and isinstance(excinfo.value, reraise):
- raise
- result = None
- # use the perf counter
- precise_stop = timing.perf_counter()
- duration = precise_stop - precise_start
- stop = timing.time()
- return cls(
- start=start,
- stop=stop,
- duration=duration,
- when=when,
- result=result,
- excinfo=excinfo,
- _ispytest=True,
- )
-
- def __repr__(self) -> str:
- if self.excinfo is None:
- return f"<CallInfo when={self.when!r} result: {self._result!r}>"
- return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
-
-
-def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
- return TestReport.from_item_and_call(item, call)
-
-
-def pytest_make_collect_report(collector: Collector) -> CollectReport:
- call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
- longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None
- if not call.excinfo:
- outcome: Literal["passed", "skipped", "failed"] = "passed"
- else:
- skip_exceptions = [Skipped]
- unittest = sys.modules.get("unittest")
- if unittest is not None:
- # Type ignored because unittest is loaded dynamically.
- skip_exceptions.append(unittest.SkipTest) # type: ignore
- if isinstance(call.excinfo.value, tuple(skip_exceptions)):
- outcome = "skipped"
- r_ = collector._repr_failure_py(call.excinfo, "line")
- assert isinstance(r_, ExceptionChainRepr), repr(r_)
- r = r_.reprcrash
- assert r
- longrepr = (str(r.path), r.lineno, r.message)
- else:
- outcome = "failed"
- errorinfo = collector.repr_failure(call.excinfo)
- if not hasattr(errorinfo, "toterminal"):
- assert isinstance(errorinfo, str)
- errorinfo = CollectErrorRepr(errorinfo)
- longrepr = errorinfo
- result = call.result if not call.excinfo else None
- rep = CollectReport(collector.nodeid, outcome, longrepr, result)
- rep.call = call # type: ignore # see collect_one_node
- return rep
-
-
-class SetupState:
- """Shared state for setting up/tearing down test items or collectors
- in a session.
-
- Suppose we have a collection tree as follows:
-
- <Session session>
- <Module mod1>
- <Function item1>
- <Module mod2>
- <Function item2>
-
- The SetupState maintains a stack. The stack starts out empty:
-
- []
-
- During the setup phase of item1, setup(item1) is called. What it does
- is:
-
- push session to stack, run session.setup()
- push mod1 to stack, run mod1.setup()
- push item1 to stack, run item1.setup()
-
- The stack is:
-
- [session, mod1, item1]
-
- While the stack is in this shape, it is allowed to add finalizers to
- each of session, mod1, item1 using addfinalizer().
-
- During the teardown phase of item1, teardown_exact(item2) is called,
- where item2 is the next item to item1. What it does is:
-
- pop item1 from stack, run its teardowns
- pop mod1 from stack, run its teardowns
-
- mod1 was popped because it ended its purpose with item1. The stack is:
-
- [session]
-
- During the setup phase of item2, setup(item2) is called. What it does
- is:
-
- push mod2 to stack, run mod2.setup()
- push item2 to stack, run item2.setup()
-
- Stack:
-
- [session, mod2, item2]
-
- During the teardown phase of item2, teardown_exact(None) is called,
- because item2 is the last item. What it does is:
-
- pop item2 from stack, run its teardowns
- pop mod2 from stack, run its teardowns
- pop session from stack, run its teardowns
-
- Stack:
-
- []
-
- The end!
- """
-
- def __init__(self) -> None:
- # The stack is in the dict insertion order.
- self.stack: Dict[
- Node,
- Tuple[
- # Node's finalizers.
- List[Callable[[], object]],
- # Node's exception, if its setup raised.
- Optional[Union[OutcomeException, Exception]],
- ],
- ] = {}
-
- def setup(self, item: Item) -> None:
- """Setup objects along the collector chain to the item."""
- needed_collectors = item.listchain()
-
- # If a collector fails its setup, fail its entire subtree of items.
- # The setup is not retried for each item - the same exception is used.
- for col, (finalizers, exc) in self.stack.items():
- assert col in needed_collectors, "previous item was not torn down properly"
- if exc:
- raise exc
-
- for col in needed_collectors[len(self.stack) :]:
- assert col not in self.stack
- # Push onto the stack.
- self.stack[col] = ([col.teardown], None)
- try:
- col.setup()
- except TEST_OUTCOME as exc:
- self.stack[col] = (self.stack[col][0], exc)
- raise exc
-
- def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None:
- """Attach a finalizer to the given node.
-
- The node must be currently active in the stack.
- """
- assert node and not isinstance(node, tuple)
- assert callable(finalizer)
- assert node in self.stack, (node, self.stack)
- self.stack[node][0].append(finalizer)
-
- def teardown_exact(self, nextitem: Optional[Item]) -> None:
- """Teardown the current stack up until reaching nodes that nextitem
- also descends from.
-
- When nextitem is None (meaning we're at the last item), the entire
- stack is torn down.
- """
- needed_collectors = nextitem and nextitem.listchain() or []
- exc = None
- while self.stack:
- if list(self.stack.keys()) == needed_collectors[: len(self.stack)]:
- break
- node, (finalizers, _) = self.stack.popitem()
- while finalizers:
- fin = finalizers.pop()
- try:
- fin()
- except TEST_OUTCOME as e:
- # XXX Only first exception will be seen by user,
- # ideally all should be reported.
- if exc is None:
- exc = e
- if exc:
- raise exc
- if nextitem is None:
- assert not self.stack
-
-
-def collect_one_node(collector: Collector) -> CollectReport:
- ihook = collector.ihook
- ihook.pytest_collectstart(collector=collector)
- rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
- call = rep.__dict__.pop("call", None)
- if call and check_interactive_exception(call, rep):
- ihook.pytest_exception_interact(node=collector, call=call, report=rep)
- return rep
diff --git a/contrib/python/pytest/py3/_pytest/scope.py b/contrib/python/pytest/py3/_pytest/scope.py
deleted file mode 100644
index 7a746fb9fa..0000000000
--- a/contrib/python/pytest/py3/_pytest/scope.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""
-Scope definition and related utilities.
-
-Those are defined here, instead of in the 'fixtures' module because
-their use is spread across many other pytest modules, and centralizing it in 'fixtures'
-would cause circular references.
-
-Also this makes the module light to import, as it should.
-"""
-from enum import Enum
-from functools import total_ordering
-from typing import Optional
-from typing import TYPE_CHECKING
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
- _ScopeName = Literal["session", "package", "module", "class", "function"]
-
-
-@total_ordering
-class Scope(Enum):
- """
- Represents one of the possible fixture scopes in pytest.
-
- Scopes are ordered from lower to higher, that is:
-
- ->>> higher ->>>
-
- Function < Class < Module < Package < Session
-
- <<<- lower <<<-
- """
-
- # Scopes need to be listed from lower to higher.
- Function: "_ScopeName" = "function"
- Class: "_ScopeName" = "class"
- Module: "_ScopeName" = "module"
- Package: "_ScopeName" = "package"
- Session: "_ScopeName" = "session"
-
- def next_lower(self) -> "Scope":
- """Return the next lower scope."""
- index = _SCOPE_INDICES[self]
- if index == 0:
- raise ValueError(f"{self} is the lower-most scope")
- return _ALL_SCOPES[index - 1]
-
- def next_higher(self) -> "Scope":
- """Return the next higher scope."""
- index = _SCOPE_INDICES[self]
- if index == len(_SCOPE_INDICES) - 1:
- raise ValueError(f"{self} is the upper-most scope")
- return _ALL_SCOPES[index + 1]
-
- def __lt__(self, other: "Scope") -> bool:
- self_index = _SCOPE_INDICES[self]
- other_index = _SCOPE_INDICES[other]
- return self_index < other_index
-
- @classmethod
- def from_user(
- cls, scope_name: "_ScopeName", descr: str, where: Optional[str] = None
- ) -> "Scope":
- """
- Given a scope name from the user, return the equivalent Scope enum. Should be used
- whenever we want to convert a user provided scope name to its enum object.
-
- If the scope name is invalid, construct a user friendly message and call pytest.fail.
- """
- from _pytest.outcomes import fail
-
- try:
- # Holding this reference is necessary for mypy at the moment.
- scope = Scope(scope_name)
- except ValueError:
- fail(
- "{} {}got an unexpected scope value '{}'".format(
- descr, f"from {where} " if where else "", scope_name
- ),
- pytrace=False,
- )
- return scope
-
-
-_ALL_SCOPES = list(Scope)
-_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)}
-
-
-# Ordered list of scopes which can contain many tests (in practice all except Function).
-HIGH_SCOPES = [x for x in Scope if x is not Scope.Function]
diff --git a/contrib/python/pytest/py3/_pytest/setuponly.py b/contrib/python/pytest/py3/_pytest/setuponly.py
deleted file mode 100644
index 531131ce72..0000000000
--- a/contrib/python/pytest/py3/_pytest/setuponly.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from typing import Generator
-from typing import Optional
-from typing import Union
-
-import pytest
-from _pytest._io.saferepr import saferepr
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config.argparsing import Parser
-from _pytest.fixtures import FixtureDef
-from _pytest.fixtures import SubRequest
-from _pytest.scope import Scope
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("debugconfig")
- group.addoption(
- "--setuponly",
- "--setup-only",
- action="store_true",
- help="only setup fixtures, do not execute tests.",
- )
- group.addoption(
- "--setupshow",
- "--setup-show",
- action="store_true",
- help="show setup of fixtures while executing tests.",
- )
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_fixture_setup(
- fixturedef: FixtureDef[object], request: SubRequest
-) -> Generator[None, None, None]:
- yield
- if request.config.option.setupshow:
- if hasattr(request, "param"):
- # Save the fixture parameter so ._show_fixture_action() can
- # display it now and during the teardown (in .finish()).
- if fixturedef.ids:
- if callable(fixturedef.ids):
- param = fixturedef.ids(request.param)
- else:
- param = fixturedef.ids[request.param_index]
- else:
- param = request.param
- fixturedef.cached_param = param # type: ignore[attr-defined]
- _show_fixture_action(fixturedef, "SETUP")
-
-
-def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None:
- if fixturedef.cached_result is not None:
- config = fixturedef._fixturemanager.config
- if config.option.setupshow:
- _show_fixture_action(fixturedef, "TEARDOWN")
- if hasattr(fixturedef, "cached_param"):
- del fixturedef.cached_param # type: ignore[attr-defined]
-
-
-def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None:
- config = fixturedef._fixturemanager.config
- capman = config.pluginmanager.getplugin("capturemanager")
- if capman:
- capman.suspend_global_capture()
-
- tw = config.get_terminal_writer()
- tw.line()
- # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc.
- scope_indent = list(reversed(Scope)).index(fixturedef._scope)
- tw.write(" " * 2 * scope_indent)
- tw.write(
- "{step} {scope} {fixture}".format(
- step=msg.ljust(8), # align the output to TEARDOWN
- scope=fixturedef.scope[0].upper(),
- fixture=fixturedef.argname,
- )
- )
-
- if msg == "SETUP":
- deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
- if deps:
- tw.write(" (fixtures used: {})".format(", ".join(deps)))
-
- if hasattr(fixturedef, "cached_param"):
- tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") # type: ignore[attr-defined]
-
- tw.flush()
-
- if capman:
- capman.resume_global_capture()
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- if config.option.setuponly:
- config.option.setupshow = True
- return None
diff --git a/contrib/python/pytest/py3/_pytest/setupplan.py b/contrib/python/pytest/py3/_pytest/setupplan.py
deleted file mode 100644
index 9ba81ccaf0..0000000000
--- a/contrib/python/pytest/py3/_pytest/setupplan.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from typing import Optional
-from typing import Union
-
-import pytest
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config.argparsing import Parser
-from _pytest.fixtures import FixtureDef
-from _pytest.fixtures import SubRequest
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("debugconfig")
- group.addoption(
- "--setupplan",
- "--setup-plan",
- action="store_true",
- help="show what fixtures and tests would be executed but "
- "don't execute anything.",
- )
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_fixture_setup(
- fixturedef: FixtureDef[object], request: SubRequest
-) -> Optional[object]:
- # Will return a dummy fixture if the setuponly option is provided.
- if request.config.option.setupplan:
- my_cache_key = fixturedef.cache_key(request)
- fixturedef.cached_result = (None, my_cache_key, None)
- return fixturedef.cached_result
- return None
-
-
-@pytest.hookimpl(tryfirst=True)
-def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
- if config.option.setupplan:
- config.option.setuponly = True
- config.option.setupshow = True
- return None
diff --git a/contrib/python/pytest/py3/_pytest/skipping.py b/contrib/python/pytest/py3/_pytest/skipping.py
deleted file mode 100644
index ac7216f838..0000000000
--- a/contrib/python/pytest/py3/_pytest/skipping.py
+++ /dev/null
@@ -1,296 +0,0 @@
-"""Support for skip/xfail functions and markers."""
-import os
-import platform
-import sys
-import traceback
-from collections.abc import Mapping
-from typing import Generator
-from typing import Optional
-from typing import Tuple
-from typing import Type
-
-import attr
-
-from _pytest.config import Config
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.mark.structures import Mark
-from _pytest.nodes import Item
-from _pytest.outcomes import fail
-from _pytest.outcomes import skip
-from _pytest.outcomes import xfail
-from _pytest.reports import BaseReport
-from _pytest.runner import CallInfo
-from _pytest.stash import StashKey
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--runxfail",
- action="store_true",
- dest="runxfail",
- default=False,
- help="report the results of xfail tests as if they were not marked",
- )
-
- parser.addini(
- "xfail_strict",
- "default for the strict parameter of xfail "
- "markers when not given explicitly (default: False)",
- default=False,
- type="bool",
- )
-
-
-def pytest_configure(config: Config) -> None:
- if config.option.runxfail:
- # yay a hack
- import pytest
-
- old = pytest.xfail
- config.add_cleanup(lambda: setattr(pytest, "xfail", old))
-
- def nop(*args, **kwargs):
- pass
-
- nop.Exception = xfail.Exception # type: ignore[attr-defined]
- setattr(pytest, "xfail", nop)
-
- config.addinivalue_line(
- "markers",
- "skip(reason=None): skip the given test function with an optional reason. "
- 'Example: skip(reason="no way of currently testing this") skips the '
- "test.",
- )
- config.addinivalue_line(
- "markers",
- "skipif(condition, ..., *, reason=...): "
- "skip the given test function if any of the conditions evaluate to True. "
- "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
- "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
- )
- config.addinivalue_line(
- "markers",
- "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
- "mark the test function as an expected failure if any of the conditions "
- "evaluate to True. Optionally specify a reason for better reporting "
- "and run=False if you don't even want to execute the test function. "
- "If only specific exception(s) are expected, you can list them in "
- "raises, and if the test fails in other ways, it will be reported as "
- "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
- )
-
-
-def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
- """Evaluate a single skipif/xfail condition.
-
- If an old-style string condition is given, it is eval()'d, otherwise the
- condition is bool()'d. If this fails, an appropriately formatted pytest.fail
- is raised.
-
- Returns (result, reason). The reason is only relevant if the result is True.
- """
- # String condition.
- if isinstance(condition, str):
- globals_ = {
- "os": os,
- "sys": sys,
- "platform": platform,
- "config": item.config,
- }
- for dictionary in reversed(
- item.ihook.pytest_markeval_namespace(config=item.config)
- ):
- if not isinstance(dictionary, Mapping):
- raise ValueError(
- "pytest_markeval_namespace() needs to return a dict, got {!r}".format(
- dictionary
- )
- )
- globals_.update(dictionary)
- if hasattr(item, "obj"):
- globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
- try:
- filename = f"<{mark.name} condition>"
- condition_code = compile(condition, filename, "eval")
- result = eval(condition_code, globals_)
- except SyntaxError as exc:
- msglines = [
- "Error evaluating %r condition" % mark.name,
- " " + condition,
- " " + " " * (exc.offset or 0) + "^",
- "SyntaxError: invalid syntax",
- ]
- fail("\n".join(msglines), pytrace=False)
- except Exception as exc:
- msglines = [
- "Error evaluating %r condition" % mark.name,
- " " + condition,
- *traceback.format_exception_only(type(exc), exc),
- ]
- fail("\n".join(msglines), pytrace=False)
-
- # Boolean condition.
- else:
- try:
- result = bool(condition)
- except Exception as exc:
- msglines = [
- "Error evaluating %r condition as a boolean" % mark.name,
- *traceback.format_exception_only(type(exc), exc),
- ]
- fail("\n".join(msglines), pytrace=False)
-
- reason = mark.kwargs.get("reason", None)
- if reason is None:
- if isinstance(condition, str):
- reason = "condition: " + condition
- else:
- # XXX better be checked at collection time
- msg = (
- "Error evaluating %r: " % mark.name
- + "you need to specify reason=STRING when using booleans as conditions."
- )
- fail(msg, pytrace=False)
-
- return result, reason
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class Skip:
- """The result of evaluate_skip_marks()."""
-
- reason: str = "unconditional skip"
-
-
-def evaluate_skip_marks(item: Item) -> Optional[Skip]:
- """Evaluate skip and skipif marks on item, returning Skip if triggered."""
- for mark in item.iter_markers(name="skipif"):
- if "condition" not in mark.kwargs:
- conditions = mark.args
- else:
- conditions = (mark.kwargs["condition"],)
-
- # Unconditional.
- if not conditions:
- reason = mark.kwargs.get("reason", "")
- return Skip(reason)
-
- # If any of the conditions are true.
- for condition in conditions:
- result, reason = evaluate_condition(item, mark, condition)
- if result:
- return Skip(reason)
-
- for mark in item.iter_markers(name="skip"):
- try:
- return Skip(*mark.args, **mark.kwargs)
- except TypeError as e:
- raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
-
- return None
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class Xfail:
- """The result of evaluate_xfail_marks()."""
-
- reason: str
- run: bool
- strict: bool
- raises: Optional[Tuple[Type[BaseException], ...]]
-
-
-def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
- """Evaluate xfail marks on item, returning Xfail if triggered."""
- for mark in item.iter_markers(name="xfail"):
- run = mark.kwargs.get("run", True)
- strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
- raises = mark.kwargs.get("raises", None)
- if "condition" not in mark.kwargs:
- conditions = mark.args
- else:
- conditions = (mark.kwargs["condition"],)
-
- # Unconditional.
- if not conditions:
- reason = mark.kwargs.get("reason", "")
- return Xfail(reason, run, strict, raises)
-
- # If any of the conditions are true.
- for condition in conditions:
- result, reason = evaluate_condition(item, mark, condition)
- if result:
- return Xfail(reason, run, strict, raises)
-
- return None
-
-
-# Saves the xfail mark evaluation. Can be refreshed during call if None.
-xfailed_key = StashKey[Optional[Xfail]]()
-
-
-@hookimpl(tryfirst=True)
-def pytest_runtest_setup(item: Item) -> None:
- skipped = evaluate_skip_marks(item)
- if skipped:
- raise skip.Exception(skipped.reason, _use_item_location=True)
-
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
- if xfailed and not item.config.option.runxfail and not xfailed.run:
- xfail("[NOTRUN] " + xfailed.reason)
-
-
-@hookimpl(hookwrapper=True)
-def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
- xfailed = item.stash.get(xfailed_key, None)
- if xfailed is None:
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
-
- if xfailed and not item.config.option.runxfail and not xfailed.run:
- xfail("[NOTRUN] " + xfailed.reason)
-
- yield
-
- # The test run may have added an xfail mark dynamically.
- xfailed = item.stash.get(xfailed_key, None)
- if xfailed is None:
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
-
-
-@hookimpl(hookwrapper=True)
-def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
- outcome = yield
- rep = outcome.get_result()
- xfailed = item.stash.get(xfailed_key, None)
- if item.config.option.runxfail:
- pass # don't interfere
- elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
- assert call.excinfo.value.msg is not None
- rep.wasxfail = "reason: " + call.excinfo.value.msg
- rep.outcome = "skipped"
- elif not rep.skipped and xfailed:
- if call.excinfo:
- raises = xfailed.raises
- if raises is not None and not isinstance(call.excinfo.value, raises):
- rep.outcome = "failed"
- else:
- rep.outcome = "skipped"
- rep.wasxfail = xfailed.reason
- elif call.when == "call":
- if xfailed.strict:
- rep.outcome = "failed"
- rep.longrepr = "[XPASS(strict)] " + xfailed.reason
- else:
- rep.outcome = "passed"
- rep.wasxfail = xfailed.reason
-
-
-def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
- if hasattr(report, "wasxfail"):
- if report.skipped:
- return "xfailed", "x", "XFAIL"
- elif report.passed:
- return "xpassed", "X", "XPASS"
- return None
diff --git a/contrib/python/pytest/py3/_pytest/stash.py b/contrib/python/pytest/py3/_pytest/stash.py
deleted file mode 100644
index e61d75b95f..0000000000
--- a/contrib/python/pytest/py3/_pytest/stash.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from typing import Any
-from typing import cast
-from typing import Dict
-from typing import Generic
-from typing import TypeVar
-from typing import Union
-
-
-__all__ = ["Stash", "StashKey"]
-
-
-T = TypeVar("T")
-D = TypeVar("D")
-
-
-class StashKey(Generic[T]):
- """``StashKey`` is an object used as a key to a :class:`Stash`.
-
- A ``StashKey`` is associated with the type ``T`` of the value of the key.
-
- A ``StashKey`` is unique and cannot conflict with another key.
- """
-
- __slots__ = ()
-
-
-class Stash:
- r"""``Stash`` is a type-safe heterogeneous mutable mapping that
- allows keys and value types to be defined separately from
- where it (the ``Stash``) is created.
-
- Usually you will be given an object which has a ``Stash``, for example
- :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`:
-
- .. code-block:: python
-
- stash: Stash = some_object.stash
-
- If a module or plugin wants to store data in this ``Stash``, it creates
- :class:`StashKey`\s for its keys (at the module level):
-
- .. code-block:: python
-
- # At the top-level of the module
- some_str_key = StashKey[str]()
- some_bool_key = StashKey[bool]()
-
- To store information:
-
- .. code-block:: python
-
- # Value type must match the key.
- stash[some_str_key] = "value"
- stash[some_bool_key] = True
-
- To retrieve the information:
-
- .. code-block:: python
-
- # The static type of some_str is str.
- some_str = stash[some_str_key]
- # The static type of some_bool is bool.
- some_bool = stash[some_bool_key]
- """
-
- __slots__ = ("_storage",)
-
- def __init__(self) -> None:
- self._storage: Dict[StashKey[Any], object] = {}
-
- def __setitem__(self, key: StashKey[T], value: T) -> None:
- """Set a value for key."""
- self._storage[key] = value
-
- def __getitem__(self, key: StashKey[T]) -> T:
- """Get the value for key.
-
- Raises ``KeyError`` if the key wasn't set before.
- """
- return cast(T, self._storage[key])
-
- def get(self, key: StashKey[T], default: D) -> Union[T, D]:
- """Get the value for key, or return default if the key wasn't set
- before."""
- try:
- return self[key]
- except KeyError:
- return default
-
- def setdefault(self, key: StashKey[T], default: T) -> T:
- """Return the value of key if already set, otherwise set the value
- of key to default and return default."""
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
-
- def __delitem__(self, key: StashKey[T]) -> None:
- """Delete the value for key.
-
- Raises ``KeyError`` if the key wasn't set before.
- """
- del self._storage[key]
-
- def __contains__(self, key: StashKey[T]) -> bool:
- """Return whether key was set."""
- return key in self._storage
-
- def __len__(self) -> int:
- """Return how many items exist in the stash."""
- return len(self._storage)
diff --git a/contrib/python/pytest/py3/_pytest/stepwise.py b/contrib/python/pytest/py3/_pytest/stepwise.py
deleted file mode 100644
index 4d95a96b87..0000000000
--- a/contrib/python/pytest/py3/_pytest/stepwise.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from typing import List
-from typing import Optional
-from typing import TYPE_CHECKING
-
-import pytest
-from _pytest import nodes
-from _pytest.config import Config
-from _pytest.config.argparsing import Parser
-from _pytest.main import Session
-from _pytest.reports import TestReport
-
-if TYPE_CHECKING:
- from _pytest.cacheprovider import Cache
-
-STEPWISE_CACHE_DIR = "cache/stepwise"
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--sw",
- "--stepwise",
- action="store_true",
- default=False,
- dest="stepwise",
- help="exit on test failure and continue from last failing test next time",
- )
- group.addoption(
- "--sw-skip",
- "--stepwise-skip",
- action="store_true",
- default=False,
- dest="stepwise_skip",
- help="ignore the first failing test but stop on the next failing test.\n"
- "implicitly enables --stepwise.",
- )
-
-
-@pytest.hookimpl
-def pytest_configure(config: Config) -> None:
- if config.option.stepwise_skip:
- # allow --stepwise-skip to work on it's own merits.
- config.option.stepwise = True
- if config.getoption("stepwise"):
- config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
-
-
-def pytest_sessionfinish(session: Session) -> None:
- if not session.config.getoption("stepwise"):
- assert session.config.cache is not None
- # Clear the list of failing tests if the plugin is not active.
- session.config.cache.set(STEPWISE_CACHE_DIR, [])
-
-
-class StepwisePlugin:
- def __init__(self, config: Config) -> None:
- self.config = config
- self.session: Optional[Session] = None
- self.report_status = ""
- assert config.cache is not None
- self.cache: Cache = config.cache
- self.lastfailed: Optional[str] = self.cache.get(STEPWISE_CACHE_DIR, None)
- self.skip: bool = config.getoption("stepwise_skip")
-
- def pytest_sessionstart(self, session: Session) -> None:
- self.session = session
-
- def pytest_collection_modifyitems(
- self, config: Config, items: List[nodes.Item]
- ) -> None:
- if not self.lastfailed:
- self.report_status = "no previously failed tests, not skipping."
- return
-
- # check all item nodes until we find a match on last failed
- failed_index = None
- for index, item in enumerate(items):
- if item.nodeid == self.lastfailed:
- failed_index = index
- break
-
- # If the previously failed test was not found among the test items,
- # do not skip any tests.
- if failed_index is None:
- self.report_status = "previously failed test not found, not skipping."
- else:
- self.report_status = f"skipping {failed_index} already passed items."
- deselected = items[:failed_index]
- del items[:failed_index]
- config.hook.pytest_deselected(items=deselected)
-
- def pytest_runtest_logreport(self, report: TestReport) -> None:
- if report.failed:
- if self.skip:
- # Remove test from the failed ones (if it exists) and unset the skip option
- # to make sure the following tests will not be skipped.
- if report.nodeid == self.lastfailed:
- self.lastfailed = None
-
- self.skip = False
- else:
- # Mark test as the last failing and interrupt the test session.
- self.lastfailed = report.nodeid
- assert self.session is not None
- self.session.shouldstop = (
- "Test failed, continuing from this test next run."
- )
-
- else:
- # If the test was actually run and did pass.
- if report.when == "call":
- # Remove test from the failed ones, if exists.
- if report.nodeid == self.lastfailed:
- self.lastfailed = None
-
- def pytest_report_collectionfinish(self) -> Optional[str]:
- if self.config.getoption("verbose") >= 0 and self.report_status:
- return f"stepwise: {self.report_status}"
- return None
-
- def pytest_sessionfinish(self) -> None:
- self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed)
diff --git a/contrib/python/pytest/py3/_pytest/terminal.py b/contrib/python/pytest/py3/_pytest/terminal.py
deleted file mode 100644
index b4848c48ab..0000000000
--- a/contrib/python/pytest/py3/_pytest/terminal.py
+++ /dev/null
@@ -1,1400 +0,0 @@
-"""Terminal reporting of the full testing process.
-
-This is a good source for looking at the various reporting hooks.
-"""
-import argparse
-import datetime
-import inspect
-import platform
-import sys
-import warnings
-from collections import Counter
-from functools import partial
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import ClassVar
-from typing import Dict
-from typing import Generator
-from typing import List
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-from typing import Set
-from typing import TextIO
-from typing import Tuple
-from typing import TYPE_CHECKING
-from typing import Union
-
-import attr
-import pluggy
-
-import _pytest._version
-from _pytest import nodes
-from _pytest import timing
-from _pytest._code import ExceptionInfo
-from _pytest._code.code import ExceptionRepr
-from _pytest._io.wcwidth import wcswidth
-from _pytest.compat import final
-from _pytest.config import _PluggyPlugin
-from _pytest.config import Config
-from _pytest.config import ExitCode
-from _pytest.config import hookimpl
-from _pytest.config.argparsing import Parser
-from _pytest.nodes import Item
-from _pytest.nodes import Node
-from _pytest.pathlib import absolutepath
-from _pytest.pathlib import bestrelpath
-from _pytest.reports import BaseReport
-from _pytest.reports import CollectReport
-from _pytest.reports import TestReport
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
- from _pytest.main import Session
-
-
-REPORT_COLLECTING_RESOLUTION = 0.5
-
-KNOWN_TYPES = (
- "failed",
- "passed",
- "skipped",
- "deselected",
- "xfailed",
- "xpassed",
- "warnings",
- "error",
-)
-
-_REPORTCHARS_DEFAULT = "fE"
-
-
-class MoreQuietAction(argparse.Action):
- """A modified copy of the argparse count action which counts down and updates
- the legacy quiet attribute at the same time.
-
- Used to unify verbosity handling.
- """
-
- def __init__(
- self,
- option_strings: Sequence[str],
- dest: str,
- default: object = None,
- required: bool = False,
- help: Optional[str] = None,
- ) -> None:
- super().__init__(
- option_strings=option_strings,
- dest=dest,
- nargs=0,
- default=default,
- required=required,
- help=help,
- )
-
- def __call__(
- self,
- parser: argparse.ArgumentParser,
- namespace: argparse.Namespace,
- values: Union[str, Sequence[object], None],
- option_string: Optional[str] = None,
- ) -> None:
- new_count = getattr(namespace, self.dest, 0) - 1
- setattr(namespace, self.dest, new_count)
- # todo Deprecate config.quiet
- namespace.quiet = getattr(namespace, "quiet", 0) + 1
-
-
-def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("terminal reporting", "reporting", after="general")
- group._addoption(
- "-v",
- "--verbose",
- action="count",
- default=0,
- dest="verbose",
- help="increase verbosity.",
- )
- group._addoption(
- "--no-header",
- action="store_true",
- default=False,
- dest="no_header",
- help="disable header",
- )
- group._addoption(
- "--no-summary",
- action="store_true",
- default=False,
- dest="no_summary",
- help="disable summary",
- )
- group._addoption(
- "-q",
- "--quiet",
- action=MoreQuietAction,
- default=0,
- dest="verbose",
- help="decrease verbosity.",
- )
- group._addoption(
- "--verbosity",
- dest="verbose",
- type=int,
- default=0,
- help="set verbosity. Default is 0.",
- )
- group._addoption(
- "-r",
- action="store",
- dest="reportchars",
- default=_REPORTCHARS_DEFAULT,
- metavar="chars",
- help="show extra test summary info as specified by chars: (f)ailed, "
- "(E)rror, (s)kipped, (x)failed, (X)passed, "
- "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
- "(w)arnings are enabled by default (see --disable-warnings), "
- "'N' can be used to reset the list. (default: 'fE').",
- )
- group._addoption(
- "--disable-warnings",
- "--disable-pytest-warnings",
- default=False,
- dest="disable_warnings",
- action="store_true",
- help="disable warnings summary",
- )
- group._addoption(
- "-l",
- "--showlocals",
- action="store_true",
- dest="showlocals",
- default=False,
- help="show locals in tracebacks (disabled by default).",
- )
- group._addoption(
- "--tb",
- metavar="style",
- action="store",
- dest="tbstyle",
- default="auto",
- choices=["auto", "long", "short", "no", "line", "native"],
- help="traceback print mode (auto/long/short/line/native/no).",
- )
- group._addoption(
- "--show-capture",
- action="store",
- dest="showcapture",
- choices=["no", "stdout", "stderr", "log", "all"],
- default="all",
- help="Controls how captured stdout/stderr/log is shown on failed tests. "
- "Default is 'all'.",
- )
- group._addoption(
- "--fulltrace",
- "--full-trace",
- action="store_true",
- default=False,
- help="don't cut any tracebacks (default is to cut).",
- )
- group._addoption(
- "--color",
- metavar="color",
- action="store",
- dest="color",
- default="auto",
- choices=["yes", "no", "auto"],
- help="color terminal output (yes/no/auto).",
- )
- group._addoption(
- "--code-highlight",
- default="yes",
- choices=["yes", "no"],
- help="Whether code should be highlighted (only if --color is also enabled)",
- )
-
- parser.addini(
- "console_output_style",
- help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
- default="progress",
- )
-
-
-def pytest_configure(config: Config) -> None:
- reporter = TerminalReporter(config, sys.stdout)
- config.pluginmanager.register(reporter, "terminalreporter")
- if config.option.debug or config.option.traceconfig:
-
- def mywriter(tags, args):
- msg = " ".join(map(str, args))
- reporter.write_line("[traceconfig] " + msg)
-
- config.trace.root.setprocessor("pytest:config", mywriter)
-
-
-def getreportopt(config: Config) -> str:
- reportchars: str = config.option.reportchars
-
- old_aliases = {"F", "S"}
- reportopts = ""
- for char in reportchars:
- if char in old_aliases:
- char = char.lower()
- if char == "a":
- reportopts = "sxXEf"
- elif char == "A":
- reportopts = "PpsxXEf"
- elif char == "N":
- reportopts = ""
- elif char not in reportopts:
- reportopts += char
-
- if not config.option.disable_warnings and "w" not in reportopts:
- reportopts = "w" + reportopts
- elif config.option.disable_warnings and "w" in reportopts:
- reportopts = reportopts.replace("w", "")
-
- return reportopts
-
-
-@hookimpl(trylast=True) # after _pytest.runner
-def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
- letter = "F"
- if report.passed:
- letter = "."
- elif report.skipped:
- letter = "s"
-
- outcome: str = report.outcome
- if report.when in ("collect", "setup", "teardown") and outcome == "failed":
- outcome = "error"
- letter = "E"
-
- return outcome, letter, outcome.upper()
-
-
-@attr.s(auto_attribs=True)
-class WarningReport:
- """Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
-
- :ivar str message:
- User friendly message about the warning.
- :ivar str|None nodeid:
- nodeid that generated the warning (see ``get_location``).
- :ivar tuple fslocation:
- File system location of the source of the warning (see ``get_location``).
- """
-
- message: str
- nodeid: Optional[str] = None
- fslocation: Optional[Tuple[str, int]] = None
-
- count_towards_summary: ClassVar = True
-
- def get_location(self, config: Config) -> Optional[str]:
- """Return the more user-friendly information about the location of a warning, or None."""
- if self.nodeid:
- return self.nodeid
- if self.fslocation:
- filename, linenum = self.fslocation
- relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename))
- return f"{relpath}:{linenum}"
- return None
-
-
-@final
-class TerminalReporter:
- def __init__(self, config: Config, file: Optional[TextIO] = None) -> None:
- import _pytest.config
-
- self.config = config
- self._numcollected = 0
- self._session: Optional[Session] = None
- self._showfspath: Optional[bool] = None
-
- self.stats: Dict[str, List[Any]] = {}
- self._main_color: Optional[str] = None
- self._known_types: Optional[List[str]] = None
- self.startpath = config.invocation_params.dir
- if file is None:
- file = sys.stdout
- self._tw = _pytest.config.create_terminal_writer(config, file)
- self._screen_width = self._tw.fullwidth
- self.currentfspath: Union[None, Path, str, int] = None
- self.reportchars = getreportopt(config)
- self.hasmarkup = self._tw.hasmarkup
- self.isatty = file.isatty()
- self._progress_nodeids_reported: Set[str] = set()
- self._show_progress_info = self._determine_show_progress_info()
- self._collect_report_last_write: Optional[float] = None
- self._already_displayed_warnings: Optional[int] = None
- self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None
-
- def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
- """Return whether we should display progress information based on the current config."""
- # do not show progress if we are not capturing output (#3038)
- if self.config.getoption("capture", "no") == "no":
- return False
- # do not show progress if we are showing fixture setup/teardown
- if self.config.getoption("setupshow", False):
- return False
- cfg: str = self.config.getini("console_output_style")
- if cfg == "progress":
- return "progress"
- elif cfg == "count":
- return "count"
- else:
- return False
-
- @property
- def verbosity(self) -> int:
- verbosity: int = self.config.option.verbose
- return verbosity
-
- @property
- def showheader(self) -> bool:
- return self.verbosity >= 0
-
- @property
- def no_header(self) -> bool:
- return bool(self.config.option.no_header)
-
- @property
- def no_summary(self) -> bool:
- return bool(self.config.option.no_summary)
-
- @property
- def showfspath(self) -> bool:
- if self._showfspath is None:
- return self.verbosity >= 0
- return self._showfspath
-
- @showfspath.setter
- def showfspath(self, value: Optional[bool]) -> None:
- self._showfspath = value
-
- @property
- def showlongtestinfo(self) -> bool:
- return self.verbosity > 0
-
- def hasopt(self, char: str) -> bool:
- char = {"xfailed": "x", "skipped": "s"}.get(char, char)
- return char in self.reportchars
-
- def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None:
- fspath = self.config.rootpath / nodeid.split("::")[0]
- if self.currentfspath is None or fspath != self.currentfspath:
- if self.currentfspath is not None and self._show_progress_info:
- self._write_progress_information_filling_space()
- self.currentfspath = fspath
- relfspath = bestrelpath(self.startpath, fspath)
- self._tw.line()
- self._tw.write(relfspath + " ")
- self._tw.write(res, flush=True, **markup)
-
- def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
- if self.currentfspath != prefix:
- self._tw.line()
- self.currentfspath = prefix
- self._tw.write(prefix)
- if extra:
- self._tw.write(extra, **kwargs)
- self.currentfspath = -2
-
- def ensure_newline(self) -> None:
- if self.currentfspath:
- self._tw.line()
- self.currentfspath = None
-
- def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
- self._tw.write(content, flush=flush, **markup)
-
- def flush(self) -> None:
- self._tw.flush()
-
- def write_line(self, line: Union[str, bytes], **markup: bool) -> None:
- if not isinstance(line, str):
- line = str(line, errors="replace")
- self.ensure_newline()
- self._tw.line(line, **markup)
-
- def rewrite(self, line: str, **markup: bool) -> None:
- """Rewinds the terminal cursor to the beginning and writes the given line.
-
- :param erase:
- If True, will also add spaces until the full terminal width to ensure
- previous lines are properly erased.
-
- The rest of the keyword arguments are markup instructions.
- """
- erase = markup.pop("erase", False)
- if erase:
- fill_count = self._tw.fullwidth - len(line) - 1
- fill = " " * fill_count
- else:
- fill = ""
- line = str(line)
- self._tw.write("\r" + line + fill, **markup)
-
- def write_sep(
- self,
- sep: str,
- title: Optional[str] = None,
- fullwidth: Optional[int] = None,
- **markup: bool,
- ) -> None:
- self.ensure_newline()
- self._tw.sep(sep, title, fullwidth, **markup)
-
- def section(self, title: str, sep: str = "=", **kw: bool) -> None:
- self._tw.sep(sep, title, **kw)
-
- def line(self, msg: str, **kw: bool) -> None:
- self._tw.line(msg, **kw)
-
- def _add_stats(self, category: str, items: Sequence[Any]) -> None:
- set_main_color = category not in self.stats
- self.stats.setdefault(category, []).extend(items)
- if set_main_color:
- self._set_main_color()
-
- def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
- for line in str(excrepr).split("\n"):
- self.write_line("INTERNALERROR> " + line)
- return True
-
- def pytest_warning_recorded(
- self,
- warning_message: warnings.WarningMessage,
- nodeid: str,
- ) -> None:
- from _pytest.warnings import warning_record_to_str
-
- fslocation = warning_message.filename, warning_message.lineno
- message = warning_record_to_str(warning_message)
-
- warning_report = WarningReport(
- fslocation=fslocation, message=message, nodeid=nodeid
- )
- self._add_stats("warnings", [warning_report])
-
- def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
- if self.config.option.traceconfig:
- msg = f"PLUGIN registered: {plugin}"
- # XXX This event may happen during setup/teardown time
- # which unfortunately captures our output here
- # which garbles our output if we use self.write_line.
- self.write_line(msg)
-
- def pytest_deselected(self, items: Sequence[Item]) -> None:
- self._add_stats("deselected", items)
-
- def pytest_runtest_logstart(
- self, nodeid: str, location: Tuple[str, Optional[int], str]
- ) -> None:
- # Ensure that the path is printed before the
- # 1st test of a module starts running.
- if self.showlongtestinfo:
- line = self._locationline(nodeid, *location)
- self.write_ensure_prefix(line, "")
- self.flush()
- elif self.showfspath:
- self.write_fspath_result(nodeid, "")
- self.flush()
-
- def pytest_runtest_logreport(self, report: TestReport) -> None:
- self._tests_ran = True
- rep = report
- res: Tuple[
- str, str, Union[str, Tuple[str, Mapping[str, bool]]]
- ] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
- category, letter, word = res
- if not isinstance(word, tuple):
- markup = None
- else:
- word, markup = word
- self._add_stats(category, [rep])
- if not letter and not word:
- # Probably passed setup/teardown.
- return
- running_xdist = hasattr(rep, "node")
- if markup is None:
- was_xfail = hasattr(report, "wasxfail")
- if rep.passed and not was_xfail:
- markup = {"green": True}
- elif rep.passed and was_xfail:
- markup = {"yellow": True}
- elif rep.failed:
- markup = {"red": True}
- elif rep.skipped:
- markup = {"yellow": True}
- else:
- markup = {}
- if self.verbosity <= 0:
- self._tw.write(letter, **markup)
- else:
- self._progress_nodeids_reported.add(rep.nodeid)
- line = self._locationline(rep.nodeid, *rep.location)
- if not running_xdist:
- self.write_ensure_prefix(line, word, **markup)
- if rep.skipped or hasattr(report, "wasxfail"):
- reason = _get_raw_skip_reason(rep)
- if self.config.option.verbose < 2:
- available_width = (
- (self._tw.fullwidth - self._tw.width_of_current_line)
- - len(" [100%]")
- - 1
- )
- formatted_reason = _format_trimmed(
- " ({})", reason, available_width
- )
- else:
- formatted_reason = f" ({reason})"
-
- if reason and formatted_reason is not None:
- self._tw.write(formatted_reason)
- if self._show_progress_info:
- self._write_progress_information_filling_space()
- else:
- self.ensure_newline()
- self._tw.write("[%s]" % rep.node.gateway.id)
- if self._show_progress_info:
- self._tw.write(
- self._get_progress_information_message() + " ", cyan=True
- )
- else:
- self._tw.write(" ")
- self._tw.write(word, **markup)
- self._tw.write(" " + line)
- self.currentfspath = -2
- self.flush()
-
- @property
- def _is_last_item(self) -> bool:
- assert self._session is not None
- return len(self._progress_nodeids_reported) == self._session.testscollected
-
- def pytest_runtest_logfinish(self, nodeid: str) -> None:
- assert self._session
- if self.verbosity <= 0 and self._show_progress_info:
- if self._show_progress_info == "count":
- num_tests = self._session.testscollected
- progress_length = len(f" [{num_tests}/{num_tests}]")
- else:
- progress_length = len(" [100%]")
-
- self._progress_nodeids_reported.add(nodeid)
-
- if self._is_last_item:
- self._write_progress_information_filling_space()
- else:
- main_color, _ = self._get_main_color()
- w = self._width_of_current_line
- past_edge = w + progress_length + 1 >= self._screen_width
- if past_edge:
- msg = self._get_progress_information_message()
- self._tw.write(msg + "\n", **{main_color: True})
-
- def _get_progress_information_message(self) -> str:
- assert self._session
- collected = self._session.testscollected
- if self._show_progress_info == "count":
- if collected:
- progress = self._progress_nodeids_reported
- counter_format = f"{{:{len(str(collected))}d}}"
- format_string = f" [{counter_format}/{{}}]"
- return format_string.format(len(progress), collected)
- return f" [ {collected} / {collected} ]"
- else:
- if collected:
- return " [{:3d}%]".format(
- len(self._progress_nodeids_reported) * 100 // collected
- )
- return " [100%]"
-
- def _write_progress_information_filling_space(self) -> None:
- color, _ = self._get_main_color()
- msg = self._get_progress_information_message()
- w = self._width_of_current_line
- fill = self._tw.fullwidth - w - 1
- self.write(msg.rjust(fill), flush=True, **{color: True})
-
- @property
- def _width_of_current_line(self) -> int:
- """Return the width of the current line."""
- return self._tw.width_of_current_line
-
- def pytest_collection(self) -> None:
- if self.isatty:
- if self.config.option.verbose >= 0:
- self.write("collecting ... ", flush=True, bold=True)
- self._collect_report_last_write = timing.time()
- elif self.config.option.verbose >= 1:
- self.write("collecting ... ", flush=True, bold=True)
-
- def pytest_collectreport(self, report: CollectReport) -> None:
- if report.failed:
- self._add_stats("error", [report])
- elif report.skipped:
- self._add_stats("skipped", [report])
- items = [x for x in report.result if isinstance(x, Item)]
- self._numcollected += len(items)
- if self.isatty:
- self.report_collect()
-
- def report_collect(self, final: bool = False) -> None:
- if self.config.option.verbose < 0:
- return
-
- if not final:
- # Only write "collecting" report every 0.5s.
- t = timing.time()
- if (
- self._collect_report_last_write is not None
- and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
- ):
- return
- self._collect_report_last_write = t
-
- errors = len(self.stats.get("error", []))
- skipped = len(self.stats.get("skipped", []))
- deselected = len(self.stats.get("deselected", []))
- selected = self._numcollected - deselected
- line = "collected " if final else "collecting "
- line += (
- str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
- )
- if errors:
- line += " / %d error%s" % (errors, "s" if errors != 1 else "")
- if deselected:
- line += " / %d deselected" % deselected
- if skipped:
- line += " / %d skipped" % skipped
- if self._numcollected > selected:
- line += " / %d selected" % selected
- if self.isatty:
- self.rewrite(line, bold=True, erase=True)
- if final:
- self.write("\n")
- else:
- self.write_line(line)
-
- @hookimpl(trylast=True)
- def pytest_sessionstart(self, session: "Session") -> None:
- self._session = session
- self._sessionstarttime = timing.time()
- if not self.showheader:
- return
- self.write_sep("=", "test session starts", bold=True)
- verinfo = platform.python_version()
- if not self.no_header:
- msg = f"platform {sys.platform} -- Python {verinfo}"
- pypy_version_info = getattr(sys, "pypy_version_info", None)
- if pypy_version_info:
- verinfo = ".".join(map(str, pypy_version_info[:3]))
- msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]"
- msg += ", pytest-{}, pluggy-{}".format(
- _pytest._version.version, pluggy.__version__
- )
- if (
- self.verbosity > 0
- or self.config.option.debug
- or getattr(self.config.option, "pastebin", None)
- ):
- msg += " -- " + str(sys.executable)
- self.write_line(msg)
- lines = self.config.hook.pytest_report_header(
- config=self.config, start_path=self.startpath
- )
- self._write_report_lines_from_hooks(lines)
-
- def _write_report_lines_from_hooks(
- self, lines: Sequence[Union[str, Sequence[str]]]
- ) -> None:
- for line_or_lines in reversed(lines):
- if isinstance(line_or_lines, str):
- self.write_line(line_or_lines)
- else:
- for line in line_or_lines:
- self.write_line(line)
-
- def pytest_report_header(self, config: Config) -> List[str]:
- line = "rootdir: %s" % config.rootpath
-
- if config.inipath:
- line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
-
- testpaths: List[str] = config.getini("testpaths")
- if config.invocation_params.dir == config.rootpath and config.args == testpaths:
- line += ", testpaths: {}".format(", ".join(testpaths))
-
- result = [line]
-
- plugininfo = config.pluginmanager.list_plugin_distinfo()
- if plugininfo:
- result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
- return result
-
- def pytest_collection_finish(self, session: "Session") -> None:
- self.report_collect(True)
-
- lines = self.config.hook.pytest_report_collectionfinish(
- config=self.config,
- start_path=self.startpath,
- items=session.items,
- )
- self._write_report_lines_from_hooks(lines)
-
- if self.config.getoption("collectonly"):
- if session.items:
- if self.config.option.verbose > -1:
- self._tw.line("")
- self._printcollecteditems(session.items)
-
- failed = self.stats.get("failed")
- if failed:
- self._tw.sep("!", "collection failures")
- for rep in failed:
- rep.toterminal(self._tw)
-
- def _printcollecteditems(self, items: Sequence[Item]) -> None:
- if self.config.option.verbose < 0:
- if self.config.option.verbose < -1:
- counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
- for name, count in sorted(counts.items()):
- self._tw.line("%s: %d" % (name, count))
- else:
- for item in items:
- self._tw.line(item.nodeid)
- return
- stack: List[Node] = []
- indent = ""
- for item in items:
- needed_collectors = item.listchain()[1:] # strip root node
- while stack:
- if stack == needed_collectors[: len(stack)]:
- break
- stack.pop()
- for col in needed_collectors[len(stack) :]:
- stack.append(col)
- indent = (len(stack) - 1) * " "
- self._tw.line(f"{indent}{col}")
- if self.config.option.verbose >= 1:
- obj = getattr(col, "obj", None)
- doc = inspect.getdoc(obj) if obj else None
- if doc:
- for line in doc.splitlines():
- self._tw.line("{}{}".format(indent + " ", line))
-
- @hookimpl(hookwrapper=True)
- def pytest_sessionfinish(
- self, session: "Session", exitstatus: Union[int, ExitCode]
- ):
- outcome = yield
- outcome.get_result()
- self._tw.line("")
- summary_exit_codes = (
- ExitCode.OK,
- ExitCode.TESTS_FAILED,
- ExitCode.INTERRUPTED,
- ExitCode.USAGE_ERROR,
- ExitCode.NO_TESTS_COLLECTED,
- )
- if exitstatus in summary_exit_codes and not self.no_summary:
- self.config.hook.pytest_terminal_summary(
- terminalreporter=self, exitstatus=exitstatus, config=self.config
- )
- if session.shouldfail:
- self.write_sep("!", str(session.shouldfail), red=True)
- if exitstatus == ExitCode.INTERRUPTED:
- self._report_keyboardinterrupt()
- self._keyboardinterrupt_memo = None
- elif session.shouldstop:
- self.write_sep("!", str(session.shouldstop), red=True)
- self.summary_stats()
-
- @hookimpl(hookwrapper=True)
- def pytest_terminal_summary(self) -> Generator[None, None, None]:
- self.summary_errors()
- self.summary_failures()
- self.summary_warnings()
- self.summary_passes()
- yield
- self.short_test_summary()
- # Display any extra warnings from teardown here (if any).
- self.summary_warnings()
-
- def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
- self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
-
- def pytest_unconfigure(self) -> None:
- if self._keyboardinterrupt_memo is not None:
- self._report_keyboardinterrupt()
-
- def _report_keyboardinterrupt(self) -> None:
- excrepr = self._keyboardinterrupt_memo
- assert excrepr is not None
- assert excrepr.reprcrash is not None
- msg = excrepr.reprcrash.message
- self.write_sep("!", msg)
- if "KeyboardInterrupt" in msg:
- if self.config.option.fulltrace:
- excrepr.toterminal(self._tw)
- else:
- excrepr.reprcrash.toterminal(self._tw)
- self._tw.line(
- "(to show a full traceback on KeyboardInterrupt use --full-trace)",
- yellow=True,
- )
-
- def _locationline(
- self, nodeid: str, fspath: str, lineno: Optional[int], domain: str
- ) -> str:
- def mkrel(nodeid: str) -> str:
- line = self.config.cwd_relative_nodeid(nodeid)
- if domain and line.endswith(domain):
- line = line[: -len(domain)]
- values = domain.split("[")
- values[0] = values[0].replace(".", "::") # don't replace '.' in params
- line += "[".join(values)
- return line
-
- # collect_fspath comes from testid which has a "/"-normalized path.
- if fspath:
- res = mkrel(nodeid)
- if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
- "\\", nodes.SEP
- ):
- res += " <- " + bestrelpath(self.startpath, Path(fspath))
- else:
- res = "[location]"
- return res + " "
-
- def _getfailureheadline(self, rep):
- head_line = rep.head_line
- if head_line:
- return head_line
- return "test session" # XXX?
-
- def _getcrashline(self, rep):
- try:
- return str(rep.longrepr.reprcrash)
- except AttributeError:
- try:
- return str(rep.longrepr)[:50]
- except AttributeError:
- return ""
-
- #
- # Summaries for sessionfinish.
- #
- def getreports(self, name: str):
- return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")]
-
- def summary_warnings(self) -> None:
- if self.hasopt("w"):
- all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings")
- if not all_warnings:
- return
-
- final = self._already_displayed_warnings is not None
- if final:
- warning_reports = all_warnings[self._already_displayed_warnings :]
- else:
- warning_reports = all_warnings
- self._already_displayed_warnings = len(warning_reports)
- if not warning_reports:
- return
-
- reports_grouped_by_message: Dict[str, List[WarningReport]] = {}
- for wr in warning_reports:
- reports_grouped_by_message.setdefault(wr.message, []).append(wr)
-
- def collapsed_location_report(reports: List[WarningReport]) -> str:
- locations = []
- for w in reports:
- location = w.get_location(self.config)
- if location:
- locations.append(location)
-
- if len(locations) < 10:
- return "\n".join(map(str, locations))
-
- counts_by_filename = Counter(
- str(loc).split("::", 1)[0] for loc in locations
- )
- return "\n".join(
- "{}: {} warning{}".format(k, v, "s" if v > 1 else "")
- for k, v in counts_by_filename.items()
- )
-
- title = "warnings summary (final)" if final else "warnings summary"
- self.write_sep("=", title, yellow=True, bold=False)
- for message, message_reports in reports_grouped_by_message.items():
- maybe_location = collapsed_location_report(message_reports)
- if maybe_location:
- self._tw.line(maybe_location)
- lines = message.splitlines()
- indented = "\n".join(" " + x for x in lines)
- message = indented.rstrip()
- else:
- message = message.rstrip()
- self._tw.line(message)
- self._tw.line()
- self._tw.line(
- "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html"
- )
-
- def summary_passes(self) -> None:
- if self.config.option.tbstyle != "no":
- if self.hasopt("P"):
- reports: List[TestReport] = self.getreports("passed")
- if not reports:
- return
- self.write_sep("=", "PASSES")
- for rep in reports:
- if rep.sections:
- msg = self._getfailureheadline(rep)
- self.write_sep("_", msg, green=True, bold=True)
- self._outrep_summary(rep)
- self._handle_teardown_sections(rep.nodeid)
-
- def _get_teardown_reports(self, nodeid: str) -> List[TestReport]:
- reports = self.getreports("")
- return [
- report
- for report in reports
- if report.when == "teardown" and report.nodeid == nodeid
- ]
-
- def _handle_teardown_sections(self, nodeid: str) -> None:
- for report in self._get_teardown_reports(nodeid):
- self.print_teardown_sections(report)
-
- def print_teardown_sections(self, rep: TestReport) -> None:
- showcapture = self.config.option.showcapture
- if showcapture == "no":
- return
- for secname, content in rep.sections:
- if showcapture != "all" and showcapture not in secname:
- continue
- if "teardown" in secname:
- self._tw.sep("-", secname)
- if content[-1:] == "\n":
- content = content[:-1]
- self._tw.line(content)
-
- def summary_failures(self) -> None:
- if self.config.option.tbstyle != "no":
- reports: List[BaseReport] = self.getreports("failed")
- if not reports:
- return
- self.write_sep("=", "FAILURES")
- if self.config.option.tbstyle == "line":
- for rep in reports:
- line = self._getcrashline(rep)
- self.write_line(line)
- else:
- for rep in reports:
- msg = self._getfailureheadline(rep)
- self.write_sep("_", msg, red=True, bold=True)
- self._outrep_summary(rep)
- self._handle_teardown_sections(rep.nodeid)
-
- def summary_errors(self) -> None:
- if self.config.option.tbstyle != "no":
- reports: List[BaseReport] = self.getreports("error")
- if not reports:
- return
- self.write_sep("=", "ERRORS")
- for rep in self.stats["error"]:
- msg = self._getfailureheadline(rep)
- if rep.when == "collect":
- msg = "ERROR collecting " + msg
- else:
- msg = f"ERROR at {rep.when} of {msg}"
- self.write_sep("_", msg, red=True, bold=True)
- self._outrep_summary(rep)
-
- def _outrep_summary(self, rep: BaseReport) -> None:
- rep.toterminal(self._tw)
- showcapture = self.config.option.showcapture
- if showcapture == "no":
- return
- for secname, content in rep.sections:
- if showcapture != "all" and showcapture not in secname:
- continue
- self._tw.sep("-", secname)
- if content[-1:] == "\n":
- content = content[:-1]
- self._tw.line(content)
-
- def summary_stats(self) -> None:
- if self.verbosity < -1:
- return
-
- session_duration = timing.time() - self._sessionstarttime
- (parts, main_color) = self.build_summary_stats_line()
- line_parts = []
-
- display_sep = self.verbosity >= 0
- if display_sep:
- fullwidth = self._tw.fullwidth
- for text, markup in parts:
- with_markup = self._tw.markup(text, **markup)
- if display_sep:
- fullwidth += len(with_markup) - len(text)
- line_parts.append(with_markup)
- msg = ", ".join(line_parts)
-
- main_markup = {main_color: True}
- duration = f" in {format_session_duration(session_duration)}"
- duration_with_markup = self._tw.markup(duration, **main_markup)
- if display_sep:
- fullwidth += len(duration_with_markup) - len(duration)
- msg += duration_with_markup
-
- if display_sep:
- markup_for_end_sep = self._tw.markup("", **main_markup)
- if markup_for_end_sep.endswith("\x1b[0m"):
- markup_for_end_sep = markup_for_end_sep[:-4]
- fullwidth += len(markup_for_end_sep)
- msg += markup_for_end_sep
-
- if display_sep:
- self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
- else:
- self.write_line(msg, **main_markup)
-
- def short_test_summary(self) -> None:
- if not self.reportchars:
- return
-
- def show_simple(stat, lines: List[str]) -> None:
- failed = self.stats.get(stat, [])
- if not failed:
- return
- termwidth = self._tw.fullwidth
- config = self.config
- for rep in failed:
- line = _get_line_with_reprcrash_message(config, rep, termwidth)
- lines.append(line)
-
- def show_xfailed(lines: List[str]) -> None:
- xfailed = self.stats.get("xfailed", [])
- for rep in xfailed:
- verbose_word = rep._get_verbose_word(self.config)
- pos = _get_pos(self.config, rep)
- lines.append(f"{verbose_word} {pos}")
- reason = rep.wasxfail
- if reason:
- lines.append(" " + str(reason))
-
- def show_xpassed(lines: List[str]) -> None:
- xpassed = self.stats.get("xpassed", [])
- for rep in xpassed:
- verbose_word = rep._get_verbose_word(self.config)
- pos = _get_pos(self.config, rep)
- reason = rep.wasxfail
- lines.append(f"{verbose_word} {pos} {reason}")
-
- def show_skipped(lines: List[str]) -> None:
- skipped: List[CollectReport] = self.stats.get("skipped", [])
- fskips = _folded_skips(self.startpath, skipped) if skipped else []
- if not fskips:
- return
- verbose_word = skipped[0]._get_verbose_word(self.config)
- for num, fspath, lineno, reason in fskips:
- if reason.startswith("Skipped: "):
- reason = reason[9:]
- if lineno is not None:
- lines.append(
- "%s [%d] %s:%d: %s"
- % (verbose_word, num, fspath, lineno, reason)
- )
- else:
- lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
-
- REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
- "x": show_xfailed,
- "X": show_xpassed,
- "f": partial(show_simple, "failed"),
- "s": show_skipped,
- "p": partial(show_simple, "passed"),
- "E": partial(show_simple, "error"),
- }
-
- lines: List[str] = []
- for char in self.reportchars:
- action = REPORTCHAR_ACTIONS.get(char)
- if action: # skipping e.g. "P" (passed with output) here.
- action(lines)
-
- if lines:
- self.write_sep("=", "short test summary info")
- for line in lines:
- self.write_line(line)
-
- def _get_main_color(self) -> Tuple[str, List[str]]:
- if self._main_color is None or self._known_types is None or self._is_last_item:
- self._set_main_color()
- assert self._main_color
- assert self._known_types
- return self._main_color, self._known_types
-
- def _determine_main_color(self, unknown_type_seen: bool) -> str:
- stats = self.stats
- if "failed" in stats or "error" in stats:
- main_color = "red"
- elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
- main_color = "yellow"
- elif "passed" in stats or not self._is_last_item:
- main_color = "green"
- else:
- main_color = "yellow"
- return main_color
-
- def _set_main_color(self) -> None:
- unknown_types: List[str] = []
- for found_type in self.stats.keys():
- if found_type: # setup/teardown reports have an empty key, ignore them
- if found_type not in KNOWN_TYPES and found_type not in unknown_types:
- unknown_types.append(found_type)
- self._known_types = list(KNOWN_TYPES) + unknown_types
- self._main_color = self._determine_main_color(bool(unknown_types))
-
- def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
- """
- Build the parts used in the last summary stats line.
-
- The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
-
- This function builds a list of the "parts" that make up for the text in that line, in
- the example above it would be:
-
- [
- ("12 passed", {"green": True}),
- ("2 errors", {"red": True}
- ]
-
- That last dict for each line is a "markup dictionary", used by TerminalWriter to
- color output.
-
- The final color of the line is also determined by this function, and is the second
- element of the returned tuple.
- """
- if self.config.getoption("collectonly"):
- return self._build_collect_only_summary_stats_line()
- else:
- return self._build_normal_summary_stats_line()
-
- def _get_reports_to_display(self, key: str) -> List[Any]:
- """Get test/collection reports for the given status key, such as `passed` or `error`."""
- reports = self.stats.get(key, [])
- return [x for x in reports if getattr(x, "count_towards_summary", True)]
-
- def _build_normal_summary_stats_line(
- self,
- ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
- main_color, known_types = self._get_main_color()
- parts = []
-
- for key in known_types:
- reports = self._get_reports_to_display(key)
- if reports:
- count = len(reports)
- color = _color_for_type.get(key, _color_for_type_default)
- markup = {color: True, "bold": color == main_color}
- parts.append(("%d %s" % pluralize(count, key), markup))
-
- if not parts:
- parts = [("no tests ran", {_color_for_type_default: True})]
-
- return parts, main_color
-
- def _build_collect_only_summary_stats_line(
- self,
- ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
- deselected = len(self._get_reports_to_display("deselected"))
- errors = len(self._get_reports_to_display("error"))
-
- if self._numcollected == 0:
- parts = [("no tests collected", {"yellow": True})]
- main_color = "yellow"
-
- elif deselected == 0:
- main_color = "green"
- collected_output = "%d %s collected" % pluralize(self._numcollected, "test")
- parts = [(collected_output, {main_color: True})]
- else:
- all_tests_were_deselected = self._numcollected == deselected
- if all_tests_were_deselected:
- main_color = "yellow"
- collected_output = f"no tests collected ({deselected} deselected)"
- else:
- main_color = "green"
- selected = self._numcollected - deselected
- collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
-
- parts = [(collected_output, {main_color: True})]
-
- if errors:
- main_color = _color_for_type["error"]
- parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})]
-
- return parts, main_color
-
-
-def _get_pos(config: Config, rep: BaseReport):
- nodeid = config.cwd_relative_nodeid(rep.nodeid)
- return nodeid
-
-
-def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
- """Format msg into format, ellipsizing it if doesn't fit in available_width.
-
- Returns None if even the ellipsis can't fit.
- """
- # Only use the first line.
- i = msg.find("\n")
- if i != -1:
- msg = msg[:i]
-
- ellipsis = "..."
- format_width = wcswidth(format.format(""))
- if format_width + len(ellipsis) > available_width:
- return None
-
- if format_width + wcswidth(msg) > available_width:
- available_width -= len(ellipsis)
- msg = msg[:available_width]
- while format_width + wcswidth(msg) > available_width:
- msg = msg[:-1]
- msg += ellipsis
-
- return format.format(msg)
-
-
-def _get_line_with_reprcrash_message(
- config: Config, rep: BaseReport, termwidth: int
-) -> str:
- """Get summary line for a report, trying to add reprcrash message."""
- verbose_word = rep._get_verbose_word(config)
- pos = _get_pos(config, rep)
-
- line = f"{verbose_word} {pos}"
- line_width = wcswidth(line)
-
- try:
- # Type ignored intentionally -- possible AttributeError expected.
- msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
- except AttributeError:
- pass
- else:
- available_width = termwidth - line_width
- msg = _format_trimmed(" - {}", msg, available_width)
- if msg is not None:
- line += msg
-
- return line
-
-
-def _folded_skips(
- startpath: Path,
- skipped: Sequence[CollectReport],
-) -> List[Tuple[int, str, Optional[int], str]]:
- d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {}
- for event in skipped:
- assert event.longrepr is not None
- assert isinstance(event.longrepr, tuple), (event, event.longrepr)
- assert len(event.longrepr) == 3, (event, event.longrepr)
- fspath, lineno, reason = event.longrepr
- # For consistency, report all fspaths in relative form.
- fspath = bestrelpath(startpath, Path(fspath))
- keywords = getattr(event, "keywords", {})
- # Folding reports with global pytestmark variable.
- # This is a workaround, because for now we cannot identify the scope of a skip marker
- # TODO: Revisit after marks scope would be fixed.
- if (
- event.when == "setup"
- and "skip" in keywords
- and "pytestmark" not in keywords
- ):
- key: Tuple[str, Optional[int], str] = (fspath, None, reason)
- else:
- key = (fspath, lineno, reason)
- d.setdefault(key, []).append(event)
- values: List[Tuple[int, str, Optional[int], str]] = []
- for key, events in d.items():
- values.append((len(events), *key))
- return values
-
-
-_color_for_type = {
- "failed": "red",
- "error": "red",
- "warnings": "yellow",
- "passed": "green",
-}
-_color_for_type_default = "yellow"
-
-
-def pluralize(count: int, noun: str) -> Tuple[int, str]:
- # No need to pluralize words such as `failed` or `passed`.
- if noun not in ["error", "warnings", "test"]:
- return count, noun
-
- # The `warnings` key is plural. To avoid API breakage, we keep it that way but
- # set it to singular here so we can determine plurality in the same way as we do
- # for `error`.
- noun = noun.replace("warnings", "warning")
-
- return count, noun + "s" if count != 1 else noun
-
-
-def _plugin_nameversions(plugininfo) -> List[str]:
- values: List[str] = []
- for plugin, dist in plugininfo:
- # Gets us name and version!
- name = "{dist.project_name}-{dist.version}".format(dist=dist)
- # Questionable convenience, but it keeps things short.
- if name.startswith("pytest-"):
- name = name[7:]
- # We decided to print python package names they can have more than one plugin.
- if name not in values:
- values.append(name)
- return values
-
-
-def format_session_duration(seconds: float) -> str:
- """Format the given seconds in a human readable manner to show in the final summary."""
- if seconds < 60:
- return f"{seconds:.2f}s"
- else:
- dt = datetime.timedelta(seconds=int(seconds))
- return f"{seconds:.2f}s ({dt})"
-
-
-def _get_raw_skip_reason(report: TestReport) -> str:
- """Get the reason string of a skip/xfail/xpass test report.
-
- The string is just the part given by the user.
- """
- if hasattr(report, "wasxfail"):
- reason = cast(str, report.wasxfail)
- if reason.startswith("reason: "):
- reason = reason[len("reason: ") :]
- return reason
- else:
- assert report.skipped
- assert isinstance(report.longrepr, tuple)
- _, _, reason = report.longrepr
- if reason.startswith("Skipped: "):
- reason = reason[len("Skipped: ") :]
- elif reason == "Skipped":
- reason = ""
- return reason
diff --git a/contrib/python/pytest/py3/_pytest/threadexception.py b/contrib/python/pytest/py3/_pytest/threadexception.py
deleted file mode 100644
index 43341e739a..0000000000
--- a/contrib/python/pytest/py3/_pytest/threadexception.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import threading
-import traceback
-import warnings
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import Generator
-from typing import Optional
-from typing import Type
-
-import pytest
-
-
-# Copied from cpython/Lib/test/support/threading_helper.py, with modifications.
-class catch_threading_exception:
- """Context manager catching threading.Thread exception using
- threading.excepthook.
-
- Storing exc_value using a custom hook can create a reference cycle. The
- reference cycle is broken explicitly when the context manager exits.
-
- Storing thread using a custom hook can resurrect it if it is set to an
- object which is being finalized. Exiting the context manager clears the
- stored object.
-
- Usage:
- with threading_helper.catch_threading_exception() as cm:
- # code spawning a thread which raises an exception
- ...
- # check the thread exception: use cm.args
- ...
- # cm.args attribute no longer exists at this point
- # (to break a reference cycle)
- """
-
- def __init__(self) -> None:
- self.args: Optional["threading.ExceptHookArgs"] = None
- self._old_hook: Optional[Callable[["threading.ExceptHookArgs"], Any]] = None
-
- def _hook(self, args: "threading.ExceptHookArgs") -> None:
- self.args = args
-
- def __enter__(self) -> "catch_threading_exception":
- self._old_hook = threading.excepthook
- threading.excepthook = self._hook
- return self
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- assert self._old_hook is not None
- threading.excepthook = self._old_hook
- self._old_hook = None
- del self.args
-
-
-def thread_exception_runtest_hook() -> Generator[None, None, None]:
- with catch_threading_exception() as cm:
- yield
- if cm.args:
- thread_name = "<unknown>" if cm.args.thread is None else cm.args.thread.name
- msg = f"Exception in thread {thread_name}\n\n"
- msg += "".join(
- traceback.format_exception(
- cm.args.exc_type,
- cm.args.exc_value,
- cm.args.exc_traceback,
- )
- )
- warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))
-
-
-@pytest.hookimpl(hookwrapper=True, trylast=True)
-def pytest_runtest_setup() -> Generator[None, None, None]:
- yield from thread_exception_runtest_hook()
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_call() -> Generator[None, None, None]:
- yield from thread_exception_runtest_hook()
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_teardown() -> Generator[None, None, None]:
- yield from thread_exception_runtest_hook()
diff --git a/contrib/python/pytest/py3/_pytest/timing.py b/contrib/python/pytest/py3/_pytest/timing.py
deleted file mode 100644
index 925163a585..0000000000
--- a/contrib/python/pytest/py3/_pytest/timing.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""Indirection for time functions.
-
-We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect
-pytest runtime information (issue #185).
-
-Fixture "mock_timing" also interacts with this module for pytest's own tests.
-"""
-from time import perf_counter
-from time import sleep
-from time import time
-
-__all__ = ["perf_counter", "sleep", "time"]
diff --git a/contrib/python/pytest/py3/_pytest/tmpdir.py b/contrib/python/pytest/py3/_pytest/tmpdir.py
deleted file mode 100644
index 12dc463a2a..0000000000
--- a/contrib/python/pytest/py3/_pytest/tmpdir.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""Support for providing temporary directories to test functions."""
-import os
-import re
-import sys
-import tempfile
-from pathlib import Path
-from typing import Optional
-
-import attr
-
-from .pathlib import LOCK_TIMEOUT
-from .pathlib import make_numbered_dir
-from .pathlib import make_numbered_dir_with_cleanup
-from .pathlib import rm_rf
-from _pytest.compat import final
-from _pytest.config import Config
-from _pytest.deprecated import check_ispytest
-from _pytest.fixtures import fixture
-from _pytest.fixtures import FixtureRequest
-from _pytest.monkeypatch import MonkeyPatch
-
-
-@final
-@attr.s(init=False)
-class TempPathFactory:
- """Factory for temporary directories under the common base temp directory.
-
- The base directory can be configured using the ``--basetemp`` option.
- """
-
- _given_basetemp = attr.ib(type=Optional[Path])
- _trace = attr.ib()
- _basetemp = attr.ib(type=Optional[Path])
-
- def __init__(
- self,
- given_basetemp: Optional[Path],
- trace,
- basetemp: Optional[Path] = None,
- *,
- _ispytest: bool = False,
- ) -> None:
- check_ispytest(_ispytest)
- if given_basetemp is None:
- self._given_basetemp = None
- else:
- # Use os.path.abspath() to get absolute path instead of resolve() as it
- # does not work the same in all platforms (see #4427).
- # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
- self._given_basetemp = Path(os.path.abspath(str(given_basetemp)))
- self._trace = trace
- self._basetemp = basetemp
-
- @classmethod
- def from_config(
- cls,
- config: Config,
- *,
- _ispytest: bool = False,
- ) -> "TempPathFactory":
- """Create a factory according to pytest configuration.
-
- :meta private:
- """
- check_ispytest(_ispytest)
- return cls(
- given_basetemp=config.option.basetemp,
- trace=config.trace.get("tmpdir"),
- _ispytest=True,
- )
-
- def _ensure_relative_to_basetemp(self, basename: str) -> str:
- basename = os.path.normpath(basename)
- if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
- raise ValueError(f"{basename} is not a normalized and relative path")
- return basename
-
- def mktemp(self, basename: str, numbered: bool = True) -> Path:
- """Create a new temporary directory managed by the factory.
-
- :param basename:
- Directory base name, must be a relative path.
-
- :param numbered:
- If ``True``, ensure the directory is unique by adding a numbered
- suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True``
- means that this function will create directories named ``"foo-0"``,
- ``"foo-1"``, ``"foo-2"`` and so on.
-
- :returns:
- The path to the new directory.
- """
- basename = self._ensure_relative_to_basetemp(basename)
- if not numbered:
- p = self.getbasetemp().joinpath(basename)
- p.mkdir(mode=0o700)
- else:
- p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700)
- self._trace("mktemp", p)
- return p
-
- def getbasetemp(self) -> Path:
- """Return the base temporary directory, creating it if needed."""
- if self._basetemp is not None:
- return self._basetemp
-
- if self._given_basetemp is not None:
- basetemp = self._given_basetemp
- if basetemp.exists():
- rm_rf(basetemp)
- basetemp.mkdir(mode=0o700)
- basetemp = basetemp.resolve()
- else:
- from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
- temproot = Path(from_env or tempfile.gettempdir()).resolve()
- user = get_user() or "unknown"
- # use a sub-directory in the temproot to speed-up
- # make_numbered_dir() call
- rootdir = temproot.joinpath(f"pytest-of-{user}")
- try:
- rootdir.mkdir(mode=0o700, exist_ok=True)
- except OSError:
- # getuser() likely returned illegal characters for the platform, use unknown back off mechanism
- rootdir = temproot.joinpath("pytest-of-unknown")
- rootdir.mkdir(mode=0o700, exist_ok=True)
- # Because we use exist_ok=True with a predictable name, make sure
- # we are the owners, to prevent any funny business (on unix, where
- # temproot is usually shared).
- # Also, to keep things private, fixup any world-readable temp
- # rootdir's permissions. Historically 0o755 was used, so we can't
- # just error out on this, at least for a while.
- if sys.platform != "win32":
- uid = os.getuid()
- rootdir_stat = rootdir.stat()
- # getuid shouldn't fail, but cpython defines such a case.
- # Let's hope for the best.
- if uid != -1:
- if rootdir_stat.st_uid != uid:
- raise OSError(
- f"The temporary directory {rootdir} is not owned by the current user. "
- "Fix this and try again."
- )
- if (rootdir_stat.st_mode & 0o077) != 0:
- os.chmod(rootdir, rootdir_stat.st_mode & ~0o077)
- basetemp = make_numbered_dir_with_cleanup(
- prefix="pytest-",
- root=rootdir,
- keep=3,
- lock_timeout=LOCK_TIMEOUT,
- mode=0o700,
- )
- assert basetemp is not None, basetemp
- self._basetemp = basetemp
- self._trace("new basetemp", basetemp)
- return basetemp
-
-
-def get_user() -> Optional[str]:
- """Return the current user name, or None if getuser() does not work
- in the current environment (see #1010)."""
- try:
- # In some exotic environments, getpass may not be importable.
- import getpass
-
- return getpass.getuser()
- except (ImportError, KeyError):
- return None
-
-
-def pytest_configure(config: Config) -> None:
- """Create a TempPathFactory and attach it to the config object.
-
- This is to comply with existing plugins which expect the handler to be
- available at pytest_configure time, but ideally should be moved entirely
- to the tmp_path_factory session fixture.
- """
- mp = MonkeyPatch()
- config.add_cleanup(mp.undo)
- _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True)
- mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False)
-
-
-@fixture(scope="session")
-def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
- """Return a :class:`pytest.TempPathFactory` instance for the test session."""
- # Set dynamically by pytest_configure() above.
- return request.config._tmp_path_factory # type: ignore
-
-
-def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path:
- name = request.node.name
- name = re.sub(r"[\W]", "_", name)
- MAXVAL = 30
- name = name[:MAXVAL]
- return factory.mktemp(name, numbered=True)
-
-
-@fixture
-def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path:
- """Return a temporary directory path object which is unique to each test
- function invocation, created as a sub directory of the base temporary
- directory.
-
- By default, a new base temporary directory is created each test session,
- and old bases are removed after 3 sessions, to aid in debugging. If
- ``--basetemp`` is used then it is cleared each session. See :ref:`base
- temporary directory`.
-
- The returned object is a :class:`pathlib.Path` object.
- """
-
- return _mk_tmp(request, tmp_path_factory)
diff --git a/contrib/python/pytest/py3/_pytest/unittest.py b/contrib/python/pytest/py3/_pytest/unittest.py
deleted file mode 100644
index 851e4943b2..0000000000
--- a/contrib/python/pytest/py3/_pytest/unittest.py
+++ /dev/null
@@ -1,414 +0,0 @@
-"""Discover and run std-library "unittest" style tests."""
-import sys
-import traceback
-import types
-from typing import Any
-from typing import Callable
-from typing import Generator
-from typing import Iterable
-from typing import List
-from typing import Optional
-from typing import Tuple
-from typing import Type
-from typing import TYPE_CHECKING
-from typing import Union
-
-import _pytest._code
-import pytest
-from _pytest.compat import getimfunc
-from _pytest.compat import is_async_function
-from _pytest.config import hookimpl
-from _pytest.fixtures import FixtureRequest
-from _pytest.nodes import Collector
-from _pytest.nodes import Item
-from _pytest.outcomes import exit
-from _pytest.outcomes import fail
-from _pytest.outcomes import skip
-from _pytest.outcomes import xfail
-from _pytest.python import Class
-from _pytest.python import Function
-from _pytest.python import Module
-from _pytest.runner import CallInfo
-from _pytest.scope import Scope
-
-if TYPE_CHECKING:
- import unittest
- import twisted.trial.unittest
-
- _SysExcInfoType = Union[
- Tuple[Type[BaseException], BaseException, types.TracebackType],
- Tuple[None, None, None],
- ]
-
-
-def pytest_pycollect_makeitem(
- collector: Union[Module, Class], name: str, obj: object
-) -> Optional["UnitTestCase"]:
- # Has unittest been imported and is obj a subclass of its TestCase?
- try:
- ut = sys.modules["unittest"]
- # Type ignored because `ut` is an opaque module.
- if not issubclass(obj, ut.TestCase): # type: ignore
- return None
- except Exception:
- return None
- # Yes, so let's collect it.
- item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj)
- return item
-
-
-class UnitTestCase(Class):
- # Marker for fixturemanger.getfixtureinfo()
- # to declare that our children do not support funcargs.
- nofuncargs = True
-
- def collect(self) -> Iterable[Union[Item, Collector]]:
- from unittest import TestLoader
-
- cls = self.obj
- if not getattr(cls, "__test__", True):
- return
-
- skipped = _is_skipped(cls)
- if not skipped:
- self._inject_setup_teardown_fixtures(cls)
- self._inject_setup_class_fixture()
-
- self.session._fixturemanager.parsefactories(self, unittest=True)
- loader = TestLoader()
- foundsomething = False
- for name in loader.getTestCaseNames(self.obj):
- x = getattr(self.obj, name)
- if not getattr(x, "__test__", True):
- continue
- funcobj = getimfunc(x)
- yield TestCaseFunction.from_parent(self, name=name, callobj=funcobj)
- foundsomething = True
-
- if not foundsomething:
- runtest = getattr(self.obj, "runTest", None)
- if runtest is not None:
- ut = sys.modules.get("twisted.trial.unittest", None)
- # Type ignored because `ut` is an opaque module.
- if ut is None or runtest != ut.TestCase.runTest: # type: ignore
- yield TestCaseFunction.from_parent(self, name="runTest")
-
- def _inject_setup_teardown_fixtures(self, cls: type) -> None:
- """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding
- teardown functions (#517)."""
- class_fixture = _make_xunit_fixture(
- cls,
- "setUpClass",
- "tearDownClass",
- "doClassCleanups",
- scope=Scope.Class,
- pass_self=False,
- )
- if class_fixture:
- cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined]
-
- method_fixture = _make_xunit_fixture(
- cls,
- "setup_method",
- "teardown_method",
- None,
- scope=Scope.Function,
- pass_self=True,
- )
- if method_fixture:
- cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined]
-
-
-def _make_xunit_fixture(
- obj: type,
- setup_name: str,
- teardown_name: str,
- cleanup_name: Optional[str],
- scope: Scope,
- pass_self: bool,
-):
- setup = getattr(obj, setup_name, None)
- teardown = getattr(obj, teardown_name, None)
- if setup is None and teardown is None:
- return None
-
- if cleanup_name:
- cleanup = getattr(obj, cleanup_name, lambda *args: None)
- else:
-
- def cleanup(*args):
- pass
-
- @pytest.fixture(
- scope=scope.value,
- autouse=True,
- # Use a unique name to speed up lookup.
- name=f"_unittest_{setup_name}_fixture_{obj.__qualname__}",
- )
- def fixture(self, request: FixtureRequest) -> Generator[None, None, None]:
- if _is_skipped(self):
- reason = self.__unittest_skip_why__
- raise pytest.skip.Exception(reason, _use_item_location=True)
- if setup is not None:
- try:
- if pass_self:
- setup(self, request.function)
- else:
- setup()
- # unittest does not call the cleanup function for every BaseException, so we
- # follow this here.
- except Exception:
- if pass_self:
- cleanup(self)
- else:
- cleanup()
-
- raise
- yield
- try:
- if teardown is not None:
- if pass_self:
- teardown(self, request.function)
- else:
- teardown()
- finally:
- if pass_self:
- cleanup(self)
- else:
- cleanup()
-
- return fixture
-
-
-class TestCaseFunction(Function):
- nofuncargs = True
- _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None
- _testcase: Optional["unittest.TestCase"] = None
-
- def _getobj(self):
- assert self.parent is not None
- # Unlike a regular Function in a Class, where `item.obj` returns
- # a *bound* method (attached to an instance), TestCaseFunction's
- # `obj` returns an *unbound* method (not attached to an instance).
- # This inconsistency is probably not desirable, but needs some
- # consideration before changing.
- return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined]
-
- def setup(self) -> None:
- # A bound method to be called during teardown() if set (see 'runtest()').
- self._explicit_tearDown: Optional[Callable[[], None]] = None
- assert self.parent is not None
- self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined]
- self._obj = getattr(self._testcase, self.name)
- if hasattr(self, "_request"):
- self._request._fillfixtures()
-
- def teardown(self) -> None:
- if self._explicit_tearDown is not None:
- self._explicit_tearDown()
- self._explicit_tearDown = None
- self._testcase = None
- self._obj = None
-
- def startTest(self, testcase: "unittest.TestCase") -> None:
- pass
-
- def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None:
- # Unwrap potential exception info (see twisted trial support below).
- rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
- try:
- excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type]
- # Invoke the attributes to trigger storing the traceback
- # trial causes some issue there.
- excinfo.value
- excinfo.traceback
- except TypeError:
- try:
- try:
- values = traceback.format_exception(*rawexcinfo)
- values.insert(
- 0,
- "NOTE: Incompatible Exception Representation, "
- "displaying natively:\n\n",
- )
- fail("".join(values), pytrace=False)
- except (fail.Exception, KeyboardInterrupt):
- raise
- except BaseException:
- fail(
- "ERROR: Unknown Incompatible Exception "
- "representation:\n%r" % (rawexcinfo,),
- pytrace=False,
- )
- except KeyboardInterrupt:
- raise
- except fail.Exception:
- excinfo = _pytest._code.ExceptionInfo.from_current()
- self.__dict__.setdefault("_excinfo", []).append(excinfo)
-
- def addError(
- self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType"
- ) -> None:
- try:
- if isinstance(rawexcinfo[1], exit.Exception):
- exit(rawexcinfo[1].msg)
- except TypeError:
- pass
- self._addexcinfo(rawexcinfo)
-
- def addFailure(
- self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType"
- ) -> None:
- self._addexcinfo(rawexcinfo)
-
- def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None:
- try:
- raise pytest.skip.Exception(reason, _use_item_location=True)
- except skip.Exception:
- self._addexcinfo(sys.exc_info())
-
- def addExpectedFailure(
- self,
- testcase: "unittest.TestCase",
- rawexcinfo: "_SysExcInfoType",
- reason: str = "",
- ) -> None:
- try:
- xfail(str(reason))
- except xfail.Exception:
- self._addexcinfo(sys.exc_info())
-
- def addUnexpectedSuccess(
- self,
- testcase: "unittest.TestCase",
- reason: Optional["twisted.trial.unittest.Todo"] = None,
- ) -> None:
- msg = "Unexpected success"
- if reason:
- msg += f": {reason.reason}"
- # Preserve unittest behaviour - fail the test. Explicitly not an XPASS.
- try:
- fail(msg, pytrace=False)
- except fail.Exception:
- self._addexcinfo(sys.exc_info())
-
- def addSuccess(self, testcase: "unittest.TestCase") -> None:
- pass
-
- def stopTest(self, testcase: "unittest.TestCase") -> None:
- pass
-
- def runtest(self) -> None:
- from _pytest.debugging import maybe_wrap_pytest_function_for_tracing
-
- assert self._testcase is not None
-
- maybe_wrap_pytest_function_for_tracing(self)
-
- # Let the unittest framework handle async functions.
- if is_async_function(self.obj):
- # Type ignored because self acts as the TestResult, but is not actually one.
- self._testcase(result=self) # type: ignore[arg-type]
- else:
- # When --pdb is given, we want to postpone calling tearDown() otherwise
- # when entering the pdb prompt, tearDown() would have probably cleaned up
- # instance variables, which makes it difficult to debug.
- # Arguably we could always postpone tearDown(), but this changes the moment where the
- # TestCase instance interacts with the results object, so better to only do it
- # when absolutely needed.
- if self.config.getoption("usepdb") and not _is_skipped(self.obj):
- self._explicit_tearDown = self._testcase.tearDown
- setattr(self._testcase, "tearDown", lambda *args: None)
-
- # We need to update the actual bound method with self.obj, because
- # wrap_pytest_function_for_tracing replaces self.obj by a wrapper.
- setattr(self._testcase, self.name, self.obj)
- try:
- self._testcase(result=self) # type: ignore[arg-type]
- finally:
- delattr(self._testcase, self.name)
-
- def _prunetraceback(
- self, excinfo: _pytest._code.ExceptionInfo[BaseException]
- ) -> None:
- super()._prunetraceback(excinfo)
- traceback = excinfo.traceback.filter(
- lambda x: not x.frame.f_globals.get("__unittest")
- )
- if traceback:
- excinfo.traceback = traceback
-
-
-@hookimpl(tryfirst=True)
-def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
- if isinstance(item, TestCaseFunction):
- if item._excinfo:
- call.excinfo = item._excinfo.pop(0)
- try:
- del call.result
- except AttributeError:
- pass
-
- # Convert unittest.SkipTest to pytest.skip.
- # This is actually only needed for nose, which reuses unittest.SkipTest for
- # its own nose.SkipTest. For unittest TestCases, SkipTest is already
- # handled internally, and doesn't reach here.
- unittest = sys.modules.get("unittest")
- if (
- unittest
- and call.excinfo
- and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined]
- ):
- excinfo = call.excinfo
- call2 = CallInfo[None].from_call(
- lambda: pytest.skip(str(excinfo.value)), call.when
- )
- call.excinfo = call2.excinfo
-
-
-# Twisted trial support.
-
-
-@hookimpl(hookwrapper=True)
-def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
- if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
- ut: Any = sys.modules["twisted.python.failure"]
- Failure__init__ = ut.Failure.__init__
- check_testcase_implements_trial_reporter()
-
- def excstore(
- self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
- ):
- if exc_value is None:
- self._rawexcinfo = sys.exc_info()
- else:
- if exc_type is None:
- exc_type = type(exc_value)
- self._rawexcinfo = (exc_type, exc_value, exc_tb)
- try:
- Failure__init__(
- self, exc_value, exc_type, exc_tb, captureVars=captureVars
- )
- except TypeError:
- Failure__init__(self, exc_value, exc_type, exc_tb)
-
- ut.Failure.__init__ = excstore
- yield
- ut.Failure.__init__ = Failure__init__
- else:
- yield
-
-
-def check_testcase_implements_trial_reporter(done: List[int] = []) -> None:
- if done:
- return
- from zope.interface import classImplements
- from twisted.trial.itrial import IReporter
-
- classImplements(TestCaseFunction, IReporter)
- done.append(1)
-
-
-def _is_skipped(obj) -> bool:
- """Return True if the given object has been marked with @unittest.skip."""
- return bool(getattr(obj, "__unittest_skip__", False))
diff --git a/contrib/python/pytest/py3/_pytest/unraisableexception.py b/contrib/python/pytest/py3/_pytest/unraisableexception.py
deleted file mode 100644
index fcb5d8237c..0000000000
--- a/contrib/python/pytest/py3/_pytest/unraisableexception.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import sys
-import traceback
-import warnings
-from types import TracebackType
-from typing import Any
-from typing import Callable
-from typing import Generator
-from typing import Optional
-from typing import Type
-
-import pytest
-
-
-# Copied from cpython/Lib/test/support/__init__.py, with modifications.
-class catch_unraisable_exception:
- """Context manager catching unraisable exception using sys.unraisablehook.
-
- Storing the exception value (cm.unraisable.exc_value) creates a reference
- cycle. The reference cycle is broken explicitly when the context manager
- exits.
-
- Storing the object (cm.unraisable.object) can resurrect it if it is set to
- an object which is being finalized. Exiting the context manager clears the
- stored object.
-
- Usage:
- with catch_unraisable_exception() as cm:
- # code creating an "unraisable exception"
- ...
- # check the unraisable exception: use cm.unraisable
- ...
- # cm.unraisable attribute no longer exists at this point
- # (to break a reference cycle)
- """
-
- def __init__(self) -> None:
- self.unraisable: Optional["sys.UnraisableHookArgs"] = None
- self._old_hook: Optional[Callable[["sys.UnraisableHookArgs"], Any]] = None
-
- def _hook(self, unraisable: "sys.UnraisableHookArgs") -> None:
- # Storing unraisable.object can resurrect an object which is being
- # finalized. Storing unraisable.exc_value creates a reference cycle.
- self.unraisable = unraisable
-
- def __enter__(self) -> "catch_unraisable_exception":
- self._old_hook = sys.unraisablehook
- sys.unraisablehook = self._hook
- return self
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- assert self._old_hook is not None
- sys.unraisablehook = self._old_hook
- self._old_hook = None
- del self.unraisable
-
-
-def unraisable_exception_runtest_hook() -> Generator[None, None, None]:
- with catch_unraisable_exception() as cm:
- yield
- if cm.unraisable:
- if cm.unraisable.err_msg is not None:
- err_msg = cm.unraisable.err_msg
- else:
- err_msg = "Exception ignored in"
- msg = f"{err_msg}: {cm.unraisable.object!r}\n\n"
- msg += "".join(
- traceback.format_exception(
- cm.unraisable.exc_type,
- cm.unraisable.exc_value,
- cm.unraisable.exc_traceback,
- )
- )
- warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_setup() -> Generator[None, None, None]:
- yield from unraisable_exception_runtest_hook()
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_call() -> Generator[None, None, None]:
- yield from unraisable_exception_runtest_hook()
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_teardown() -> Generator[None, None, None]:
- yield from unraisable_exception_runtest_hook()
diff --git a/contrib/python/pytest/py3/_pytest/warning_types.py b/contrib/python/pytest/py3/_pytest/warning_types.py
deleted file mode 100644
index ac79bb53ac..0000000000
--- a/contrib/python/pytest/py3/_pytest/warning_types.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from typing import Any
-from typing import Generic
-from typing import Type
-from typing import TypeVar
-
-import attr
-
-from _pytest.compat import final
-
-
-class PytestWarning(UserWarning):
- """Base class for all warnings emitted by pytest."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestAssertRewriteWarning(PytestWarning):
- """Warning emitted by the pytest assert rewrite module."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestCacheWarning(PytestWarning):
- """Warning emitted by the cache plugin in various situations."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestConfigWarning(PytestWarning):
- """Warning emitted for configuration issues."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestCollectionWarning(PytestWarning):
- """Warning emitted when pytest is not able to collect a file or symbol in a module."""
-
- __module__ = "pytest"
-
-
-class PytestDeprecationWarning(PytestWarning, DeprecationWarning):
- """Warning class for features that will be removed in a future version."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestRemovedIn8Warning(PytestDeprecationWarning):
- """Warning class for features that will be removed in pytest 8."""
-
- __module__ = "pytest"
-
-
-@final
-class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
- """Warning category used to denote experiments in pytest.
-
- Use sparingly as the API might change or even be removed completely in a
- future version.
- """
-
- __module__ = "pytest"
-
- @classmethod
- def simple(cls, apiname: str) -> "PytestExperimentalApiWarning":
- return cls(
- "{apiname} is an experimental api that may change over time".format(
- apiname=apiname
- )
- )
-
-
-@final
-class PytestUnhandledCoroutineWarning(PytestWarning):
- """Warning emitted for an unhandled coroutine.
-
- A coroutine was encountered when collecting test functions, but was not
- handled by any async-aware plugin.
- Coroutine test functions are not natively supported.
- """
-
- __module__ = "pytest"
-
-
-@final
-class PytestUnknownMarkWarning(PytestWarning):
- """Warning emitted on use of unknown markers.
-
- See :ref:`mark` for details.
- """
-
- __module__ = "pytest"
-
-
-@final
-class PytestUnraisableExceptionWarning(PytestWarning):
- """An unraisable exception was reported.
-
- Unraisable exceptions are exceptions raised in :meth:`__del__ <object.__del__>`
- implementations and similar situations when the exception cannot be raised
- as normal.
- """
-
- __module__ = "pytest"
-
-
-@final
-class PytestUnhandledThreadExceptionWarning(PytestWarning):
- """An unhandled exception occurred in a :class:`~threading.Thread`.
-
- Such exceptions don't propagate normally.
- """
-
- __module__ = "pytest"
-
-
-_W = TypeVar("_W", bound=PytestWarning)
-
-
-@final
-@attr.s(auto_attribs=True)
-class UnformattedWarning(Generic[_W]):
- """A warning meant to be formatted during runtime.
-
- This is used to hold warnings that need to format their message at runtime,
- as opposed to a direct message.
- """
-
- category: Type["_W"]
- template: str
-
- def format(self, **kwargs: Any) -> _W:
- """Return an instance of the warning category, formatted with given kwargs."""
- return self.category(self.template.format(**kwargs))
diff --git a/contrib/python/pytest/py3/_pytest/warnings.py b/contrib/python/pytest/py3/_pytest/warnings.py
deleted file mode 100644
index 4aaa944529..0000000000
--- a/contrib/python/pytest/py3/_pytest/warnings.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import sys
-import warnings
-from contextlib import contextmanager
-from typing import Generator
-from typing import Optional
-from typing import TYPE_CHECKING
-
-import pytest
-from _pytest.config import apply_warning_filters
-from _pytest.config import Config
-from _pytest.config import parse_warning_filter
-from _pytest.main import Session
-from _pytest.nodes import Item
-from _pytest.terminal import TerminalReporter
-
-if TYPE_CHECKING:
- from typing_extensions import Literal
-
-
-def pytest_configure(config: Config) -> None:
- config.addinivalue_line(
- "markers",
- "filterwarnings(warning): add a warning filter to the given test. "
- "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ",
- )
-
-
-@contextmanager
-def catch_warnings_for_item(
- config: Config,
- ihook,
- when: "Literal['config', 'collect', 'runtest']",
- item: Optional[Item],
-) -> Generator[None, None, None]:
- """Context manager that catches warnings generated in the contained execution block.
-
- ``item`` can be None if we are not in the context of an item execution.
-
- Each warning captured triggers the ``pytest_warning_recorded`` hook.
- """
- config_filters = config.getini("filterwarnings")
- cmdline_filters = config.known_args_namespace.pythonwarnings or []
- with warnings.catch_warnings(record=True) as log:
- # mypy can't infer that record=True means log is not None; help it.
- assert log is not None
-
- if not sys.warnoptions:
- # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908).
- warnings.filterwarnings("always", category=DeprecationWarning)
- warnings.filterwarnings("always", category=PendingDeprecationWarning)
-
- apply_warning_filters(config_filters, cmdline_filters)
-
- # apply filters from "filterwarnings" marks
- nodeid = "" if item is None else item.nodeid
- if item is not None:
- for mark in item.iter_markers(name="filterwarnings"):
- for arg in mark.args:
- warnings.filterwarnings(*parse_warning_filter(arg, escape=False))
-
- yield
-
- for warning_message in log:
- ihook.pytest_warning_recorded.call_historic(
- kwargs=dict(
- warning_message=warning_message,
- nodeid=nodeid,
- when=when,
- location=None,
- )
- )
-
-
-def warning_record_to_str(warning_message: warnings.WarningMessage) -> str:
- """Convert a warnings.WarningMessage to a string."""
- warn_msg = warning_message.message
- msg = warnings.formatwarning(
- str(warn_msg),
- warning_message.category,
- warning_message.filename,
- warning_message.lineno,
- warning_message.line,
- )
- if warning_message.source is not None:
- try:
- import tracemalloc
- except ImportError:
- pass
- else:
- tb = tracemalloc.get_object_traceback(warning_message.source)
- if tb is not None:
- formatted_tb = "\n".join(tb.format())
- # Use a leading new line to better separate the (large) output
- # from the traceback to the previous warning text.
- msg += f"\nObject allocated at:\n{formatted_tb}"
- else:
- # No need for a leading new line.
- url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings"
- msg += "Enable tracemalloc to get traceback where the object was allocated.\n"
- msg += f"See {url} for more info."
- return msg
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
- with catch_warnings_for_item(
- config=item.config, ihook=item.ihook, when="runtest", item=item
- ):
- yield
-
-
-@pytest.hookimpl(hookwrapper=True, tryfirst=True)
-def pytest_collection(session: Session) -> Generator[None, None, None]:
- config = session.config
- with catch_warnings_for_item(
- config=config, ihook=config.hook, when="collect", item=None
- ):
- yield
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_terminal_summary(
- terminalreporter: TerminalReporter,
-) -> Generator[None, None, None]:
- config = terminalreporter.config
- with catch_warnings_for_item(
- config=config, ihook=config.hook, when="config", item=None
- ):
- yield
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_sessionfinish(session: Session) -> Generator[None, None, None]:
- config = session.config
- with catch_warnings_for_item(
- config=config, ihook=config.hook, when="config", item=None
- ):
- yield
-
-
-@pytest.hookimpl(hookwrapper=True)
-def pytest_load_initial_conftests(
- early_config: "Config",
-) -> Generator[None, None, None]:
- with catch_warnings_for_item(
- config=early_config, ihook=early_config.hook, when="config", item=None
- ):
- yield