diff options
author | monster <monster@ydb.tech> | 2022-07-07 14:41:37 +0300 |
---|---|---|
committer | monster <monster@ydb.tech> | 2022-07-07 14:41:37 +0300 |
commit | 06e5c21a835c0e923506c4ff27929f34e00761c2 (patch) | |
tree | 75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/python/pytest/py2/_pytest | |
parent | 03f024c4412e3aa613bb543cf1660176320ba8f4 (diff) | |
download | ydb-06e5c21a835c0e923506c4ff27929f34e00761c2.tar.gz |
fix ya.make
Diffstat (limited to 'contrib/python/pytest/py2/_pytest')
56 files changed, 0 insertions, 21991 deletions
diff --git a/contrib/python/pytest/py2/_pytest/__init__.py b/contrib/python/pytest/py2/_pytest/__init__.py deleted file mode 100644 index 17cc20b615..0000000000 --- a/contrib/python/pytest/py2/_pytest/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -__all__ = ["__version__"] - -try: - from ._version import version as __version__ -except ImportError: - # broken installation, we don't even try - # unknown only works because we do poor mans version compare - __version__ = "unknown" diff --git a/contrib/python/pytest/py2/_pytest/_argcomplete.py b/contrib/python/pytest/py2/_pytest/_argcomplete.py deleted file mode 100644 index c6cf1d8fdd..0000000000 --- a/contrib/python/pytest/py2/_pytest/_argcomplete.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -"""allow bash-completion for argparse with argcomplete if installed -needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail -to find the magic string, so _ARGCOMPLETE env. var is never set, and -this does not need special code. - -Function try_argcomplete(parser) should be called directly before -the call to ArgumentParser.parse_args(). - -The filescompleter is what you normally would use on the positional -arguments specification, in order to get "dirname/" after "dirn<TAB>" -instead of the default "dirname ": - - optparser.add_argument(Config._file_or_dir, nargs='*' - ).completer=filescompleter - -Other, application specific, completers should go in the file -doing the add_argument calls as they need to be specified as .completer -attributes as well. (If argcomplete is not installed, the function the -attribute points to will not be used). - -SPEEDUP -======= -The generic argcomplete script for bash-completion -(/etc/bash_completion.d/python-argcomplete.sh ) -uses a python program to determine startup script generated by pip. -You can speed up completion somewhat by changing this script to include - # PYTHON_ARGCOMPLETE_OK -so the the python-argcomplete-check-easy-install-script does not -need to be called to find the entry point of the code and see if that is -marked with PYTHON_ARGCOMPLETE_OK - -INSTALL/DEBUGGING -================= -To include this support in another application that has setup.py generated -scripts: -- add the line: - # PYTHON_ARGCOMPLETE_OK - near the top of the main python entry point -- include in the file calling parse_args(): - from _argcomplete import try_argcomplete, filescompleter - , call try_argcomplete just before parse_args(), and optionally add - filescompleter to the positional arguments' add_argument() -If things do not work right away: -- switch on argcomplete debugging with (also helpful when doing custom - completers): - export _ARC_DEBUG=1 -- run: - python-argcomplete-check-easy-install-script $(which appname) - echo $? - will echo 0 if the magic line has been found, 1 if not -- sometimes it helps to find early on errors using: - _ARGCOMPLETE=1 _ARC_DEBUG=1 appname - which should throw a KeyError: 'COMPLINE' (which is properly set by the - global argcomplete script). -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import sys -from glob import glob - - -class FastFilesCompleter(object): - "Fast file completer class" - - def __init__(self, directories=True): - self.directories = directories - - def __call__(self, prefix, **kwargs): - """only called on non option completions""" - if os.path.sep in prefix[1:]: - prefix_dir = len(os.path.dirname(prefix) + os.path.sep) - else: - prefix_dir = 0 - completion = [] - globbed = [] - if "*" not in prefix and "?" not in prefix: - # we are on unix, otherwise no bash - if not prefix or prefix[-1] == os.path.sep: - globbed.extend(glob(prefix + ".*")) - prefix += "*" - globbed.extend(glob(prefix)) - for x in sorted(globbed): - if os.path.isdir(x): - x += "/" - # append stripping the prefix (like bash, not like compgen) - completion.append(x[prefix_dir:]) - return completion - - -if os.environ.get("_ARGCOMPLETE"): - try: - import argcomplete.completers - except ImportError: - sys.exit(-1) - filescompleter = FastFilesCompleter() - - def try_argcomplete(parser): - argcomplete.autocomplete(parser, always_complete_options=False) - - -else: - - def try_argcomplete(parser): - pass - - filescompleter = None diff --git a/contrib/python/pytest/py2/_pytest/_code/__init__.py b/contrib/python/pytest/py2/_pytest/_code/__init__.py deleted file mode 100644 index 1394b2b10e..0000000000 --- a/contrib/python/pytest/py2/_pytest/_code/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -""" python inspection/code generation API """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from .code import Code # noqa -from .code import ExceptionInfo # noqa -from .code import filter_traceback # noqa -from .code import Frame # noqa -from .code import getrawcode # noqa -from .code import Traceback # noqa -from .source import compile_ as compile # noqa -from .source import getfslineno # noqa -from .source import Source # noqa diff --git a/contrib/python/pytest/py2/_pytest/_code/_py2traceback.py b/contrib/python/pytest/py2/_pytest/_code/_py2traceback.py deleted file mode 100644 index faacc02166..0000000000 --- a/contrib/python/pytest/py2/_pytest/_code/_py2traceback.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# copied from python-2.7.3's traceback.py -# CHANGES: -# - some_str is replaced, trying to create unicode strings -# -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import types - -from six import text_type - - -def format_exception_only(etype, value): - """Format the exception part of a traceback. - - The arguments are the exception type and value such as given by - sys.last_type and sys.last_value. The return value is a list of - strings, each ending in a newline. - - Normally, the list contains a single string; however, for - SyntaxError exceptions, it contains several lines that (when - printed) display detailed information about where the syntax - error occurred. - - The message indicating which exception occurred is always the last - string in the list. - - """ - - # An instance should not have a meaningful value parameter, but - # sometimes does, particularly for string exceptions, such as - # >>> raise string1, string2 # deprecated - # - # Clear these out first because issubtype(string1, SyntaxError) - # would throw another exception and mask the original problem. - if ( - isinstance(etype, BaseException) - or isinstance(etype, types.InstanceType) - or etype is None - or type(etype) is str - ): - return [_format_final_exc_line(etype, value)] - - stype = etype.__name__ - - if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] - - # It was a syntax error; show exactly where the problem was found. - lines = [] - try: - msg, (filename, lineno, offset, badline) = value.args - except Exception: - pass - else: - filename = filename or "<string>" - lines.append(' File "{}", line {}\n'.format(filename, lineno)) - if badline is not None: - if isinstance(badline, bytes): # python 2 only - badline = badline.decode("utf-8", "replace") - lines.append(" {}\n".format(badline.strip())) - if offset is not None: - caretspace = badline.rstrip("\n")[:offset].lstrip() - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c.isspace() and c or " ") for c in caretspace) - # only three spaces to account for offset1 == pos 0 - lines.append(" {}^\n".format("".join(caretspace))) - value = msg - - lines.append(_format_final_exc_line(stype, value)) - return lines - - -def _format_final_exc_line(etype, value): - """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) - if value is None or not valuestr: - line = "{}\n".format(etype) - else: - line = "{}: {}\n".format(etype, valuestr) - return line - - -def _some_str(value): - try: - return text_type(value) - except Exception: - try: - return bytes(value).decode("UTF-8", "replace") - except Exception: - pass - return "<unprintable {} object>".format(type(value).__name__) diff --git a/contrib/python/pytest/py2/_pytest/_code/code.py b/contrib/python/pytest/py2/_pytest/_code/code.py deleted file mode 100644 index 175d6fda01..0000000000 --- a/contrib/python/pytest/py2/_pytest/_code/code.py +++ /dev/null @@ -1,1093 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import inspect -import re -import sys -import traceback -from inspect import CO_VARARGS -from inspect import CO_VARKEYWORDS -from weakref import ref - -import attr -import pluggy -import py -from six import text_type - -import _pytest -from _pytest._io.saferepr import safeformat -from _pytest._io.saferepr import saferepr -from _pytest.compat import _PY2 -from _pytest.compat import _PY3 -from _pytest.compat import PY35 -from _pytest.compat import safe_str - -if _PY3: - from traceback import format_exception_only -else: - from ._py2traceback import format_exception_only - - -class Code(object): - """ wrapper around Python code objects """ - - def __init__(self, rawcode): - if not hasattr(rawcode, "co_filename"): - rawcode = getrawcode(rawcode) - try: - self.filename = rawcode.co_filename - self.firstlineno = rawcode.co_firstlineno - 1 - self.name = rawcode.co_name - except AttributeError: - raise TypeError("not a code object: %r" % (rawcode,)) - self.raw = rawcode - - def __eq__(self, other): - return self.raw == other.raw - - __hash__ = None - - def __ne__(self, other): - return not self == other - - @property - def path(self): - """ return a path object pointing to source code (note that it - might not point to an actually existing file). """ - try: - p = py.path.local(self.raw.co_filename) - # maybe don't try this checking - if not p.check(): - raise OSError("py.path check failed.") - except OSError: - # XXX maybe try harder like the weird logic - # in the standard lib [linecache.updatecache] does? - p = self.raw.co_filename - - return p - - @property - def fullsource(self): - """ return a _pytest._code.Source object for the full source file of the code - """ - from _pytest._code import source - - full, _ = source.findsource(self.raw) - return full - - def source(self): - """ return a _pytest._code.Source object for the code object's source only - """ - # return source only for that part of code - import _pytest._code - - return _pytest._code.Source(self.raw) - - def getargs(self, var=False): - """ return a tuple with the argument names for the code object - - if 'var' is set True also return the names of the variable and - keyword arguments when present - """ - # handfull shortcut for getting args - raw = self.raw - argcount = raw.co_argcount - if var: - argcount += raw.co_flags & CO_VARARGS - argcount += raw.co_flags & CO_VARKEYWORDS - return raw.co_varnames[:argcount] - - -class Frame(object): - """Wrapper around a Python frame holding f_locals and f_globals - in which expressions can be evaluated.""" - - def __init__(self, frame): - self.lineno = frame.f_lineno - 1 - self.f_globals = frame.f_globals - self.f_locals = frame.f_locals - self.raw = frame - self.code = Code(frame.f_code) - - @property - def statement(self): - """ statement this frame is at """ - import _pytest._code - - if self.code.fullsource is None: - return _pytest._code.Source("") - return self.code.fullsource.getstatement(self.lineno) - - def eval(self, code, **vars): - """ evaluate 'code' in the frame - - 'vars' are optional additional local variables - - returns the result of the evaluation - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - return eval(code, self.f_globals, f_locals) - - def exec_(self, code, **vars): - """ exec 'code' in the frame - - 'vars' are optiona; additional local variables - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - exec(code, self.f_globals, f_locals) - - def repr(self, object): - """ return a 'safe' (non-recursive, one-line) string repr for 'object' - """ - return saferepr(object) - - def is_true(self, object): - return object - - def getargs(self, var=False): - """ return a list of tuples (name, value) for all arguments - - if 'var' is set True also include the variable and keyword - arguments when present - """ - retval = [] - for arg in self.code.getargs(var): - try: - retval.append((arg, self.f_locals[arg])) - except KeyError: - pass # this can occur when using Psyco - return retval - - -class TracebackEntry(object): - """ a single entry in a traceback """ - - _repr_style = None - exprinfo = None - - def __init__(self, rawentry, excinfo=None): - self._excinfo = excinfo - self._rawentry = rawentry - self.lineno = rawentry.tb_lineno - 1 - - def set_repr_style(self, mode): - assert mode in ("short", "long") - self._repr_style = mode - - @property - def frame(self): - import _pytest._code - - return _pytest._code.Frame(self._rawentry.tb_frame) - - @property - def relline(self): - return self.lineno - self.frame.code.firstlineno - - def __repr__(self): - return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1) - - @property - def statement(self): - """ _pytest._code.Source object for the current statement """ - source = self.frame.code.fullsource - return source.getstatement(self.lineno) - - @property - def path(self): - """ path to the source code """ - return self.frame.code.path - - def getlocals(self): - return self.frame.f_locals - - locals = property(getlocals, None, None, "locals of underlaying frame") - - def getfirstlinesource(self): - # on Jython this firstlineno can be -1 apparently - return max(self.frame.code.firstlineno, 0) - - def getsource(self, astcache=None): - """ return failing source code. """ - # we use the passed in astcache to not reparse asttrees - # within exception info printing - from _pytest._code.source import getstatementrange_ast - - source = self.frame.code.fullsource - if source is None: - return None - key = astnode = None - if astcache is not None: - key = self.frame.code.path - if key is not None: - astnode = astcache.get(key, None) - start = self.getfirstlinesource() - try: - astnode, _, end = getstatementrange_ast( - self.lineno, source, astnode=astnode - ) - except SyntaxError: - end = self.lineno + 1 - else: - if key is not None: - astcache[key] = astnode - return source[start:end] - - source = property(getsource) - - def ishidden(self): - """ return True if the current frame has a var __tracebackhide__ - resolving to True. - - If __tracebackhide__ is a callable, it gets called with the - ExceptionInfo instance and can decide whether to hide the traceback. - - mostly for internal use - """ - f = self.frame - tbh = f.f_locals.get( - "__tracebackhide__", f.f_globals.get("__tracebackhide__", False) - ) - if tbh and callable(tbh): - return tbh(None if self._excinfo is None else self._excinfo()) - return tbh - - def __str__(self): - try: - fn = str(self.path) - except py.error.Error: - fn = "???" - name = self.frame.code.name - try: - line = str(self.statement).lstrip() - except KeyboardInterrupt: - raise - except: # noqa - line = "???" - return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) - - def name(self): - return self.frame.code.raw.co_name - - name = property(name, None, None, "co_name of underlaying code") - - -class Traceback(list): - """ Traceback objects encapsulate and offer higher level - access to Traceback entries. - """ - - Entry = TracebackEntry - - def __init__(self, tb, excinfo=None): - """ initialize from given python traceback object and ExceptionInfo """ - self._excinfo = excinfo - if hasattr(tb, "tb_next"): - - def f(cur): - while cur is not None: - yield self.Entry(cur, excinfo=excinfo) - cur = cur.tb_next - - list.__init__(self, f(tb)) - else: - list.__init__(self, tb) - - def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): - """ return a Traceback instance wrapping part of this Traceback - - by provding any combination of path, lineno and firstlineno, the - first frame to start the to-be-returned traceback is determined - - this allows cutting the first part of a Traceback instance e.g. - for formatting reasons (removing some uninteresting bits that deal - with handling of the exception/traceback) - """ - for x in self: - code = x.frame.code - codepath = code.path - if ( - (path is None or codepath == path) - and ( - excludepath is None - or not hasattr(codepath, "relto") - or not codepath.relto(excludepath) - ) - and (lineno is None or x.lineno == lineno) - and (firstlineno is None or x.frame.code.firstlineno == firstlineno) - ): - return Traceback(x._rawentry, self._excinfo) - return self - - def __getitem__(self, key): - val = super(Traceback, self).__getitem__(key) - if isinstance(key, type(slice(0))): - val = self.__class__(val) - return val - - def filter(self, fn=lambda x: not x.ishidden()): - """ return a Traceback instance with certain items removed - - fn is a function that gets a single argument, a TracebackEntry - instance, and should return True when the item should be added - to the Traceback, False when not - - by default this removes all the TracebackEntries which are hidden - (see ishidden() above) - """ - return Traceback(filter(fn, self), self._excinfo) - - def getcrashentry(self): - """ return last non-hidden traceback entry that lead - to the exception of a traceback. - """ - for i in range(-1, -len(self) - 1, -1): - entry = self[i] - if not entry.ishidden(): - return entry - return self[-1] - - def recursionindex(self): - """ return the index of the frame/TracebackEntry where recursion - originates if appropriate, None if no recursion occurred - """ - cache = {} - for i, entry in enumerate(self): - # id for the code.raw is needed to work around - # the strange metaprogramming in the decorator lib from pypi - # which generates code objects that have hash/value equality - # XXX needs a test - key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno - # print "checking for recursion at", key - values = cache.setdefault(key, []) - if values: - f = entry.frame - loc = f.f_locals - for otherloc in values: - if f.is_true( - f.eval( - co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc, - ) - ): - return i - values.append(entry.frame.f_locals) - return None - - -co_equal = compile( - "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" -) - - -@attr.s(repr=False) -class ExceptionInfo(object): - """ wraps sys.exc_info() objects and offers - help for navigating the traceback. - """ - - _assert_start_repr = ( - "AssertionError(u'assert " if _PY2 else "AssertionError('assert " - ) - - _excinfo = attr.ib() - _striptext = attr.ib(default="") - _traceback = attr.ib(default=None) - - @classmethod - def from_current(cls, exprinfo=None): - """returns an ExceptionInfo matching the current traceback - - .. warning:: - - Experimental API - - - :param exprinfo: a text string helping to determine if we should - strip ``AssertionError`` from the output, defaults - to the exception message/``__str__()`` - """ - tup = sys.exc_info() - assert tup[0] is not None, "no current exception" - _striptext = "" - if exprinfo is None and isinstance(tup[1], AssertionError): - exprinfo = getattr(tup[1], "msg", None) - if exprinfo is None: - exprinfo = saferepr(tup[1]) - if exprinfo and exprinfo.startswith(cls._assert_start_repr): - _striptext = "AssertionError: " - - return cls(tup, _striptext) - - @classmethod - def for_later(cls): - """return an unfilled ExceptionInfo - """ - return cls(None) - - @property - def type(self): - """the exception class""" - return self._excinfo[0] - - @property - def value(self): - """the exception value""" - return self._excinfo[1] - - @property - def tb(self): - """the exception raw traceback""" - return self._excinfo[2] - - @property - def typename(self): - """the type name of the exception""" - return self.type.__name__ - - @property - def traceback(self): - """the traceback""" - if self._traceback is None: - self._traceback = Traceback(self.tb, excinfo=ref(self)) - return self._traceback - - @traceback.setter - def traceback(self, value): - self._traceback = value - - def __repr__(self): - if self._excinfo is None: - return "<ExceptionInfo for raises contextmanager>" - return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback)) - - def exconly(self, tryshort=False): - """ return the exception as a string - - when 'tryshort' resolves to True, and the exception is a - _pytest._code._AssertionError, only the actual exception part of - the exception representation is returned (so 'AssertionError: ' is - removed from the beginning) - """ - lines = format_exception_only(self.type, self.value) - text = "".join(lines) - text = text.rstrip() - if tryshort: - if text.startswith(self._striptext): - text = text[len(self._striptext) :] - return text - - def errisinstance(self, exc): - """ return True if the exception is an instance of exc """ - return isinstance(self.value, exc) - - def _getreprcrash(self): - exconly = self.exconly(tryshort=True) - entry = self.traceback.getcrashentry() - path, lineno = entry.frame.code.raw.co_filename, entry.lineno - return ReprFileLocation(path, lineno + 1, exconly) - - def getrepr( - self, - showlocals=False, - style="long", - abspath=False, - tbfilter=True, - funcargs=False, - truncate_locals=True, - chain=True, - ): - """ - Return str()able representation of this exception info. - - :param bool showlocals: - Show locals per traceback entry. - Ignored if ``style=="native"``. - - :param str style: long|short|no|native traceback style - - :param bool abspath: - If paths should be changed to absolute or left unchanged. - - :param bool tbfilter: - Hide entries that contain a local variable ``__tracebackhide__==True``. - Ignored if ``style=="native"``. - - :param bool funcargs: - Show fixtures ("funcargs" for legacy purposes) per traceback entry. - - :param bool truncate_locals: - With ``showlocals==True``, make sure locals can be safely represented as strings. - - :param bool chain: if chained exceptions in Python 3 should be shown. - - .. versionchanged:: 3.9 - - Added the ``chain`` parameter. - """ - if style == "native": - return ReprExceptionInfo( - ReprTracebackNative( - traceback.format_exception( - self.type, self.value, self.traceback[0]._rawentry - ) - ), - self._getreprcrash(), - ) - - fmt = FormattedExcinfo( - showlocals=showlocals, - style=style, - abspath=abspath, - tbfilter=tbfilter, - funcargs=funcargs, - truncate_locals=truncate_locals, - chain=chain, - ) - return fmt.repr_excinfo(self) - - def __str__(self): - if self._excinfo is None: - return repr(self) - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return str(loc) - - def __unicode__(self): - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return text_type(loc) - - def match(self, regexp): - """ - Check whether the regular expression 'regexp' is found in the string - representation of the exception using ``re.search``. If it matches - then True is returned (so that it is possible to write - ``assert excinfo.match()``). If it doesn't match an AssertionError is - raised. - """ - __tracebackhide__ = True - value = ( - text_type(self.value) if isinstance(regexp, text_type) else str(self.value) - ) - if not re.search(regexp, value): - raise AssertionError( - u"Pattern {!r} not found in {!r}".format(regexp, value) - ) - return True - - -@attr.s -class FormattedExcinfo(object): - """ presenting information about failing Functions and Generators. """ - - # for traceback entries - flow_marker = ">" - fail_marker = "E" - - showlocals = attr.ib(default=False) - style = attr.ib(default="long") - abspath = attr.ib(default=True) - tbfilter = attr.ib(default=True) - funcargs = attr.ib(default=False) - truncate_locals = attr.ib(default=True) - chain = attr.ib(default=True) - astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) - - def _getindent(self, source): - # figure out indent for given source - try: - s = str(source.getstatement(len(source) - 1)) - except KeyboardInterrupt: - raise - except: # noqa - try: - s = str(source[-1]) - except KeyboardInterrupt: - raise - except: # noqa - return 0 - return 4 + (len(s) - len(s.lstrip())) - - def _getentrysource(self, entry): - source = entry.getsource(self.astcache) - if source is not None: - source = source.deindent() - return source - - def repr_args(self, entry): - if self.funcargs: - args = [] - for argname, argvalue in entry.frame.getargs(var=True): - args.append((argname, saferepr(argvalue))) - return ReprFuncArgs(args) - - def get_source(self, source, line_index=-1, excinfo=None, short=False): - """ return formatted and marked up source lines. """ - import _pytest._code - - lines = [] - if source is None or line_index >= len(source.lines): - source = _pytest._code.Source("???") - line_index = 0 - if line_index < 0: - line_index += len(source) - space_prefix = " " - if short: - lines.append(space_prefix + source.lines[line_index].strip()) - else: - for line in source.lines[:line_index]: - lines.append(space_prefix + line) - lines.append(self.flow_marker + " " + source.lines[line_index]) - for line in source.lines[line_index + 1 :]: - lines.append(space_prefix + line) - if excinfo is not None: - indent = 4 if short else self._getindent(source) - lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) - return lines - - def get_exconly(self, excinfo, indent=4, markall=False): - lines = [] - indent = " " * indent - # get the real exception information out - exlines = excinfo.exconly(tryshort=True).split("\n") - failindent = self.fail_marker + indent[1:] - for line in exlines: - lines.append(failindent + line) - if not markall: - failindent = indent - return lines - - def repr_locals(self, locals): - if self.showlocals: - lines = [] - keys = [loc for loc in locals if loc[0] != "@"] - keys.sort() - for name in keys: - value = locals[name] - if name == "__builtins__": - lines.append("__builtins__ = <builtins>") - else: - # This formatting could all be handled by the - # _repr() function, which is only reprlib.Repr in - # disguise, so is very configurable. - if self.truncate_locals: - str_repr = saferepr(value) - else: - str_repr = safeformat(value) - # if len(str_repr) < 70 or not isinstance(value, - # (list, tuple, dict)): - lines.append("%-10s = %s" % (name, str_repr)) - # else: - # self._line("%-10s =\\" % (name,)) - # # XXX - # pprint.pprint(value, stream=self.excinfowriter) - return ReprLocals(lines) - - def repr_traceback_entry(self, entry, excinfo=None): - import _pytest._code - - source = self._getentrysource(entry) - if source is None: - source = _pytest._code.Source("???") - line_index = 0 - else: - # entry.getfirstlinesource() can be -1, should be 0 on jython - line_index = entry.lineno - max(entry.getfirstlinesource(), 0) - - lines = [] - style = entry._repr_style - if style is None: - style = self.style - if style in ("short", "long"): - short = style == "short" - reprargs = self.repr_args(entry) if not short else None - s = self.get_source(source, line_index, excinfo, short=short) - lines.extend(s) - if short: - message = "in %s" % (entry.name) - else: - message = excinfo and excinfo.typename or "" - path = self._makepath(entry.path) - filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) - localsrepr = None - if not short: - localsrepr = self.repr_locals(entry.locals) - return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) - if excinfo: - lines.extend(self.get_exconly(excinfo, indent=4)) - return ReprEntry(lines, None, None, None, style) - - def _makepath(self, path): - if not self.abspath: - try: - np = py.path.local().bestrelpath(path) - except OSError: - return path - if len(np) < len(str(path)): - path = np - return path - - def repr_traceback(self, excinfo): - traceback = excinfo.traceback - if self.tbfilter: - traceback = traceback.filter() - - if is_recursion_error(excinfo): - traceback, extraline = self._truncate_recursive_traceback(traceback) - else: - extraline = None - - last = traceback[-1] - entries = [] - for index, entry in enumerate(traceback): - einfo = (last == entry) and excinfo or None - reprentry = self.repr_traceback_entry(entry, einfo) - entries.append(reprentry) - return ReprTraceback(entries, extraline, style=self.style) - - def _truncate_recursive_traceback(self, traceback): - """ - Truncate the given recursive traceback trying to find the starting point - of the recursion. - - The detection is done by going through each traceback entry and finding the - point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. - - Handle the situation where the recursion process might raise an exception (for example - comparing numpy arrays using equality raises a TypeError), in which case we do our best to - warn the user of the error and show a limited traceback. - """ - try: - recursionindex = traceback.recursionindex() - except Exception as e: - max_frames = 10 - extraline = ( - "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" - " The following exception happened when comparing locals in the stack frame:\n" - " {exc_type}: {exc_msg}\n" - " Displaying first and last {max_frames} stack frames out of {total}." - ).format( - exc_type=type(e).__name__, - exc_msg=safe_str(e), - max_frames=max_frames, - total=len(traceback), - ) - traceback = traceback[:max_frames] + traceback[-max_frames:] - else: - if recursionindex is not None: - extraline = "!!! Recursion detected (same locals & position)" - traceback = traceback[: recursionindex + 1] - else: - extraline = None - - return traceback, extraline - - def repr_excinfo(self, excinfo): - if _PY2: - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - - return ReprExceptionInfo(reprtraceback, reprcrash) - else: - repr_chain = [] - e = excinfo.value - descr = None - seen = set() - while e is not None and id(e) not in seen: - seen.add(id(e)) - if excinfo: - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - else: - # fallback to native repr if the exception doesn't have a traceback: - # ExceptionInfo objects require a full traceback to work - reprtraceback = ReprTracebackNative( - traceback.format_exception(type(e), e, None) - ) - reprcrash = None - - repr_chain += [(reprtraceback, reprcrash, descr)] - if e.__cause__ is not None and self.chain: - e = e.__cause__ - excinfo = ( - ExceptionInfo((type(e), e, e.__traceback__)) - if e.__traceback__ - else None - ) - descr = "The above exception was the direct cause of the following exception:" - elif ( - e.__context__ is not None - and not e.__suppress_context__ - and self.chain - ): - e = e.__context__ - excinfo = ( - ExceptionInfo((type(e), e, e.__traceback__)) - if e.__traceback__ - else None - ) - descr = "During handling of the above exception, another exception occurred:" - else: - e = None - repr_chain.reverse() - return ExceptionChainRepr(repr_chain) - - -class TerminalRepr(object): - def __str__(self): - s = self.__unicode__() - if _PY2: - s = s.encode("utf-8") - return s - - def __unicode__(self): - # FYI this is called from pytest-xdist's serialization of exception - # information. - io = py.io.TextIO() - tw = py.io.TerminalWriter(file=io) - self.toterminal(tw) - return io.getvalue().strip() - - def __repr__(self): - return "<%s instance at %0x>" % (self.__class__, id(self)) - - -class ExceptionRepr(TerminalRepr): - def __init__(self): - self.sections = [] - - def addsection(self, name, content, sep="-"): - self.sections.append((name, content, sep)) - - def toterminal(self, tw): - for name, content, sep in self.sections: - tw.sep(sep, name) - tw.line(content) - - -class ExceptionChainRepr(ExceptionRepr): - def __init__(self, chain): - super(ExceptionChainRepr, self).__init__() - self.chain = chain - # reprcrash and reprtraceback of the outermost (the newest) exception - # in the chain - self.reprtraceback = chain[-1][0] - self.reprcrash = chain[-1][1] - - def toterminal(self, tw): - for element in self.chain: - element[0].toterminal(tw) - if element[2] is not None: - tw.line("") - tw.line(element[2], yellow=True) - super(ExceptionChainRepr, self).toterminal(tw) - - -class ReprExceptionInfo(ExceptionRepr): - def __init__(self, reprtraceback, reprcrash): - super(ReprExceptionInfo, self).__init__() - self.reprtraceback = reprtraceback - self.reprcrash = reprcrash - - def toterminal(self, tw): - self.reprtraceback.toterminal(tw) - super(ReprExceptionInfo, self).toterminal(tw) - - -class ReprTraceback(TerminalRepr): - entrysep = "_ " - - def __init__(self, reprentries, extraline, style): - self.reprentries = reprentries - self.extraline = extraline - self.style = style - - def toterminal(self, tw): - # the entries might have different styles - for i, entry in enumerate(self.reprentries): - if entry.style == "long": - tw.line("") - entry.toterminal(tw) - if i < len(self.reprentries) - 1: - next_entry = self.reprentries[i + 1] - if ( - entry.style == "long" - or entry.style == "short" - and next_entry.style == "long" - ): - tw.sep(self.entrysep) - - if self.extraline: - tw.line(self.extraline) - - -class ReprTracebackNative(ReprTraceback): - def __init__(self, tblines): - self.style = "native" - self.reprentries = [ReprEntryNative(tblines)] - self.extraline = None - - -class ReprEntryNative(TerminalRepr): - style = "native" - - def __init__(self, tblines): - self.lines = tblines - - def toterminal(self, tw): - tw.write("".join(self.lines)) - - -class ReprEntry(TerminalRepr): - def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): - self.lines = lines - self.reprfuncargs = reprfuncargs - self.reprlocals = reprlocals - self.reprfileloc = filelocrepr - self.style = style - - def toterminal(self, tw): - if self.style == "short": - self.reprfileloc.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - # tw.line("") - return - if self.reprfuncargs: - self.reprfuncargs.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - if self.reprlocals: - tw.line("") - self.reprlocals.toterminal(tw) - if self.reprfileloc: - if self.lines: - tw.line("") - self.reprfileloc.toterminal(tw) - - def __str__(self): - return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) - - -class ReprFileLocation(TerminalRepr): - def __init__(self, path, lineno, message): - self.path = str(path) - self.lineno = lineno - self.message = message - - def toterminal(self, tw): - # filename and lineno output for each entry, - # using an output format that most editors unterstand - msg = self.message - i = msg.find("\n") - if i != -1: - msg = msg[:i] - tw.write(self.path, bold=True, red=True) - tw.line(":%s: %s" % (self.lineno, msg)) - - -class ReprLocals(TerminalRepr): - def __init__(self, lines): - self.lines = lines - - def toterminal(self, tw): - for line in self.lines: - tw.line(line) - - -class ReprFuncArgs(TerminalRepr): - def __init__(self, args): - self.args = args - - def toterminal(self, tw): - if self.args: - linesofar = "" - for name, value in self.args: - ns = "%s = %s" % (safe_str(name), safe_str(value)) - if len(ns) + len(linesofar) + 2 > tw.fullwidth: - if linesofar: - tw.line(linesofar) - linesofar = ns - else: - if linesofar: - linesofar += ", " + ns - else: - linesofar = ns - if linesofar: - tw.line(linesofar) - tw.line("") - - -def getrawcode(obj, trycall=True): - """ return code object for given function. """ - try: - return obj.__code__ - except AttributeError: - obj = getattr(obj, "im_func", obj) - obj = getattr(obj, "func_code", obj) - obj = getattr(obj, "f_code", obj) - obj = getattr(obj, "__code__", obj) - if trycall and not hasattr(obj, "co_firstlineno"): - if hasattr(obj, "__call__") and not inspect.isclass(obj): - x = getrawcode(obj.__call__, trycall=False) - if hasattr(x, "co_firstlineno"): - return x - return obj - - -if PY35: # RecursionError introduced in 3.5 - - def is_recursion_error(excinfo): - return excinfo.errisinstance(RecursionError) # noqa - - -else: - - def is_recursion_error(excinfo): - if not excinfo.errisinstance(RuntimeError): - return False - try: - return "maximum recursion depth exceeded" in str(excinfo.value) - except UnicodeError: - return False - - -# relative paths that we use to filter traceback entries from appearing to the user; -# see filter_traceback -# note: if we need to add more paths than what we have now we should probably use a list -# for better maintenance - -_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc")) -# pluggy is either a package or a single module depending on the version -if _PLUGGY_DIR.basename == "__init__.py": - _PLUGGY_DIR = _PLUGGY_DIR.dirpath() -_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath() -_PY_DIR = py.path.local(py.__file__).dirpath() - - -def filter_traceback(entry): - """Return True if a TracebackEntry instance should be removed from tracebacks: - * dynamically generated code (no code to show up for it); - * internal traceback from pytest or its internal libraries, py and pluggy. - """ - # entry.path might sometimes return a str object when the entry - # points to dynamically generated code - # see https://bitbucket.org/pytest-dev/py/issues/71 - raw_filename = entry.frame.code.raw.co_filename - is_generated = "<" in raw_filename and ">" in raw_filename - if is_generated: - return False - # entry.path might point to a non-existing file, in which case it will - # also return a str object. see #1133 - p = py.path.local(entry.path) - return ( - not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR) - ) diff --git a/contrib/python/pytest/py2/_pytest/_code/source.py b/contrib/python/pytest/py2/_pytest/_code/source.py deleted file mode 100644 index b35e97b9ce..0000000000 --- a/contrib/python/pytest/py2/_pytest/_code/source.py +++ /dev/null @@ -1,324 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import ast -import inspect -import linecache -import sys -import textwrap -import tokenize -import warnings -from ast import PyCF_ONLY_AST as _AST_FLAG -from bisect import bisect_right - -import py -import six - - -class Source(object): - """ an immutable object holding a source code fragment, - possibly deindenting it. - """ - - _compilecounter = 0 - - def __init__(self, *parts, **kwargs): - self.lines = lines = [] - de = kwargs.get("deindent", True) - for part in parts: - if not part: - partlines = [] - elif isinstance(part, Source): - partlines = part.lines - elif isinstance(part, (tuple, list)): - partlines = [x.rstrip("\n") for x in part] - elif isinstance(part, six.string_types): - partlines = part.split("\n") - else: - partlines = getsource(part, deindent=de).lines - if de: - partlines = deindent(partlines) - lines.extend(partlines) - - def __eq__(self, other): - try: - return self.lines == other.lines - except AttributeError: - if isinstance(other, str): - return str(self) == other - return False - - __hash__ = None - - def __getitem__(self, key): - if isinstance(key, int): - return self.lines[key] - else: - if key.step not in (None, 1): - raise IndexError("cannot slice a Source with a step") - newsource = Source() - newsource.lines = self.lines[key.start : key.stop] - return newsource - - def __len__(self): - return len(self.lines) - - def strip(self): - """ return new source object with trailing - and leading blank lines removed. - """ - start, end = 0, len(self) - while start < end and not self.lines[start].strip(): - start += 1 - while end > start and not self.lines[end - 1].strip(): - end -= 1 - source = Source() - source.lines[:] = self.lines[start:end] - return source - - def putaround(self, before="", after="", indent=" " * 4): - """ return a copy of the source object with - 'before' and 'after' wrapped around it. - """ - before = Source(before) - after = Source(after) - newsource = Source() - lines = [(indent + line) for line in self.lines] - newsource.lines = before.lines + lines + after.lines - return newsource - - def indent(self, indent=" " * 4): - """ return a copy of the source object with - all lines indented by the given indent-string. - """ - newsource = Source() - newsource.lines = [(indent + line) for line in self.lines] - return newsource - - def getstatement(self, lineno): - """ return Source statement which contains the - given linenumber (counted from 0). - """ - start, end = self.getstatementrange(lineno) - return self[start:end] - - def getstatementrange(self, lineno): - """ return (start, end) tuple which spans the minimal - statement region which containing the given lineno. - """ - if not (0 <= lineno < len(self)): - raise IndexError("lineno out of range") - ast, start, end = getstatementrange_ast(lineno, self) - return start, end - - def deindent(self): - """return a new source object deindented.""" - newsource = Source() - newsource.lines[:] = deindent(self.lines) - return newsource - - def isparseable(self, deindent=True): - """ return True if source is parseable, heuristically - deindenting it by default. - """ - if deindent: - source = str(self.deindent()) - else: - source = str(self) - try: - ast.parse(source) - except (SyntaxError, ValueError, TypeError): - return False - else: - return True - - def __str__(self): - return "\n".join(self.lines) - - def compile( - self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None - ): - """ return compiled code object. if filename is None - invent an artificial filename which displays - the source/line position of the caller frame. - """ - if not filename or py.path.local(filename).check(file=0): - if _genframe is None: - _genframe = sys._getframe(1) # the caller - fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno - base = "<%d-codegen " % self._compilecounter - self.__class__._compilecounter += 1 - if not filename: - filename = base + "%s:%d>" % (fn, lineno) - else: - filename = base + "%r %s:%d>" % (filename, fn, lineno) - source = "\n".join(self.lines) + "\n" - try: - co = compile(source, filename, mode, flag) - except SyntaxError: - ex = sys.exc_info()[1] - # re-represent syntax errors from parsing python strings - msglines = self.lines[: ex.lineno] - if ex.offset: - msglines.append(" " * ex.offset + "^") - msglines.append("(code was compiled probably from here: %s)" % filename) - newex = SyntaxError("\n".join(msglines)) - newex.offset = ex.offset - newex.lineno = ex.lineno - newex.text = ex.text - raise newex - else: - if flag & _AST_FLAG: - return co - lines = [(x + "\n") for x in self.lines] - linecache.cache[filename] = (1, None, lines, filename) - return co - - -# -# public API shortcut functions -# - - -def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): - """ compile the given source to a raw code object, - and maintain an internal cache which allows later - retrieval of the source code for the code object - and any recursively created code objects. - """ - if isinstance(source, ast.AST): - # XXX should Source support having AST? - return compile(source, filename, mode, flags, dont_inherit) - _genframe = sys._getframe(1) # the caller - s = Source(source) - co = s.compile(filename, mode, flags, _genframe=_genframe) - return co - - -def getfslineno(obj): - """ Return source location (path, lineno) for the given object. - If the source cannot be determined return ("", -1). - - The line number is 0-based. - """ - from .code import Code - - try: - code = Code(obj) - except TypeError: - try: - fn = inspect.getsourcefile(obj) or inspect.getfile(obj) - except TypeError: - return "", -1 - - fspath = fn and py.path.local(fn) or None - lineno = -1 - if fspath: - try: - _, lineno = findsource(obj) - except IOError: - pass - else: - fspath = code.path - lineno = code.firstlineno - assert isinstance(lineno, int) - return fspath, lineno - - -# -# helper functions -# - - -def findsource(obj): - try: - sourcelines, lineno = inspect.findsource(obj) - except Exception: - return None, -1 - source = Source() - source.lines = [line.rstrip() for line in sourcelines] - return source, lineno - - -def getsource(obj, **kwargs): - from .code import getrawcode - - obj = getrawcode(obj) - try: - strsrc = inspect.getsource(obj) - except IndentationError: - strsrc = '"Buggy python version consider upgrading, cannot get source"' - assert isinstance(strsrc, str) - return Source(strsrc, **kwargs) - - -def deindent(lines): - return textwrap.dedent("\n".join(lines)).splitlines() - - -def get_statement_startend2(lineno, node): - import ast - - # flatten all statements and except handlers into one lineno-list - # AST's line numbers start indexing at 1 - values = [] - for x in ast.walk(node): - if isinstance(x, (ast.stmt, ast.ExceptHandler)): - values.append(x.lineno - 1) - for name in ("finalbody", "orelse"): - val = getattr(x, name, None) - if val: - # treat the finally/orelse part as its own statement - values.append(val[0].lineno - 1 - 1) - values.sort() - insert_index = bisect_right(values, lineno) - start = values[insert_index - 1] - if insert_index >= len(values): - end = None - else: - end = values[insert_index] - return start, end - - -def getstatementrange_ast(lineno, source, assertion=False, astnode=None): - if astnode is None: - content = str(source) - # See #4260: - # don't produce duplicate warnings when compiling source to find ast - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - astnode = compile(content, "source", "exec", _AST_FLAG) - - start, end = get_statement_startend2(lineno, astnode) - # we need to correct the end: - # - ast-parsing strips comments - # - there might be empty lines - # - we might have lesser indented code blocks at the end - if end is None: - end = len(source.lines) - - if end > start + 1: - # make sure we don't span differently indented code blocks - # by using the BlockFinder helper used which inspect.getsource() uses itself - block_finder = inspect.BlockFinder() - # if we start with an indented line, put blockfinder to "started" mode - block_finder.started = source.lines[start][0].isspace() - it = ((x + "\n") for x in source.lines[start:end]) - try: - for tok in tokenize.generate_tokens(lambda: next(it)): - block_finder.tokeneater(*tok) - except (inspect.EndOfBlock, IndentationError): - end = block_finder.last + start - except Exception: - pass - - # the end might still point to a comment or empty line, correct it - while end: - line = source.lines[end - 1].lstrip() - if line.startswith("#") or not line: - end -= 1 - else: - break - return astnode, start, end diff --git a/contrib/python/pytest/py2/_pytest/_io/__init__.py b/contrib/python/pytest/py2/_pytest/_io/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/contrib/python/pytest/py2/_pytest/_io/__init__.py +++ /dev/null diff --git a/contrib/python/pytest/py2/_pytest/_io/saferepr.py b/contrib/python/pytest/py2/_pytest/_io/saferepr.py deleted file mode 100644 index 9b412dccad..0000000000 --- a/contrib/python/pytest/py2/_pytest/_io/saferepr.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -import pprint - -from six.moves import reprlib - - -def _call_and_format_exception(call, x, *args): - try: - # Try the vanilla repr and make sure that the result is a string - return call(x, *args) - except Exception as exc: - exc_name = type(exc).__name__ - try: - exc_info = str(exc) - except Exception: - exc_info = "unknown" - return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( - exc_name, - exc_info, - x.__class__.__name__, - id(x), - ) - - -class SafeRepr(reprlib.Repr): - """subclass of repr.Repr that limits the resulting size of repr() - and includes information on exceptions raised during the call. - """ - - def repr(self, x): - return self._callhelper(reprlib.Repr.repr, self, x) - - def repr_unicode(self, x, level): - # Strictly speaking wrong on narrow builds - def repr(u): - if "'" not in u: - return u"'%s'" % u - elif '"' not in u: - return u'"%s"' % u - else: - return u"'%s'" % u.replace("'", r"\'") - - s = repr(x[: self.maxstring]) - if len(s) > self.maxstring: - i = max(0, (self.maxstring - 3) // 2) - j = max(0, self.maxstring - 3 - i) - s = repr(x[:i] + x[len(x) - j :]) - s = s[:i] + "..." + s[len(s) - j :] - return s - - def repr_instance(self, x, level): - return self._callhelper(repr, x) - - def _callhelper(self, call, x, *args): - s = _call_and_format_exception(call, x, *args) - if len(s) > self.maxsize: - i = max(0, (self.maxsize - 3) // 2) - j = max(0, self.maxsize - 3 - i) - s = s[:i] + "..." + s[len(s) - j :] - return s - - -def safeformat(obj): - """return a pretty printed string for the given object. - Failing __repr__ functions of user instances will be represented - with a short exception info. - """ - return _call_and_format_exception(pprint.pformat, obj) - - -def saferepr(obj, maxsize=240): - """return a size-limited safe repr-string for the given object. - Failing __repr__ functions of user instances will be represented - with a short exception info and 'saferepr' generally takes - care to never raise exceptions itself. This function is a wrapper - around the Repr/reprlib functionality of the standard 2.6 lib. - """ - # review exception handling - srepr = SafeRepr() - srepr.maxstring = maxsize - srepr.maxsize = maxsize - srepr.maxother = 160 - return srepr.repr(obj) diff --git a/contrib/python/pytest/py2/_pytest/_version.py b/contrib/python/pytest/py2/_pytest/_version.py deleted file mode 100644 index 3d587586fc..0000000000 --- a/contrib/python/pytest/py2/_pytest/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '4.6.11' diff --git a/contrib/python/pytest/py2/_pytest/assertion/__init__.py b/contrib/python/pytest/py2/_pytest/assertion/__init__.py deleted file mode 100644 index 6b6abb863a..0000000000 --- a/contrib/python/pytest/py2/_pytest/assertion/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# -*- coding: utf-8 -*- -""" -support for presenting detailed information in failing assertions. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -import six - -from _pytest.assertion import rewrite -from _pytest.assertion import truncate -from _pytest.assertion import util - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--assert", - action="store", - dest="assertmode", - choices=("rewrite", "plain"), - default="rewrite", - metavar="MODE", - help="""Control assertion debugging tools. 'plain' - performs no assertion debugging. 'rewrite' - (the default) rewrites assert statements in - test modules on import to provide assert - expression information.""", - ) - - -def register_assert_rewrite(*names): - """Register one or more module names to be rewritten on import. - - This function will make sure that this module or all modules inside - the package will get their assert statements rewritten. - Thus you should make sure to call this before the module is - actually imported, usually in your __init__.py if you are a plugin - using a package. - - :raise TypeError: if the given module names are not strings. - """ - for name in names: - if not isinstance(name, str): - msg = "expected module names as *args, got {0} instead" - raise TypeError(msg.format(repr(names))) - for hook in sys.meta_path: - if isinstance(hook, rewrite.AssertionRewritingHook): - importhook = hook - break - else: - importhook = DummyRewriteHook() - importhook.mark_rewrite(*names) - - -class DummyRewriteHook(object): - """A no-op import hook for when rewriting is disabled.""" - - def mark_rewrite(self, *names): - pass - - -class AssertionState(object): - """State for the assertion plugin.""" - - def __init__(self, config, mode): - self.mode = mode - self.trace = config.trace.root.get("assertion") - self.hook = None - - -def install_importhook(config): - """Try to install the rewrite hook, raise SystemError if it fails.""" - # Jython has an AST bug that make the assertion rewriting hook malfunction. - if sys.platform.startswith("java"): - raise SystemError("rewrite not supported") - - config._assertstate = AssertionState(config, "rewrite") - config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) - sys.meta_path.insert(0, hook) - config._assertstate.trace("installed rewrite import hook") - - def undo(): - hook = config._assertstate.hook - if hook is not None and hook in sys.meta_path: - sys.meta_path.remove(hook) - - config.add_cleanup(undo) - return hook - - -def pytest_collection(session): - # this hook is only called when test modules are collected - # so for example not in the master process of pytest-xdist - # (which does not collect test modules) - assertstate = getattr(session.config, "_assertstate", None) - if assertstate: - if assertstate.hook is not None: - assertstate.hook.set_session(session) - - -def pytest_runtest_setup(item): - """Setup the pytest_assertrepr_compare hook - - The newinterpret and rewrite modules will use util._reprcompare if - it exists to use custom reporting via the - pytest_assertrepr_compare hook. This sets up this custom - comparison for the test. - """ - - def callbinrepr(op, left, right): - """Call the pytest_assertrepr_compare hook and prepare the result - - This uses the first result from the hook and then ensures the - following: - * Overly verbose explanations are truncated unless configured otherwise - (eg. if running in verbose mode). - * Embedded newlines are escaped to help util.format_explanation() - later. - * If the rewrite mode is used embedded %-characters are replaced - to protect later % formatting. - - The result can be formatted by util.format_explanation() for - pretty printing. - """ - hook_result = item.ihook.pytest_assertrepr_compare( - config=item.config, op=op, left=left, right=right - ) - for new_expl in hook_result: - if new_expl: - new_expl = truncate.truncate_if_required(new_expl, item) - new_expl = [line.replace("\n", "\\n") for line in new_expl] - res = six.text_type("\n~").join(new_expl) - if item.config.getvalue("assertmode") == "rewrite": - res = res.replace("%", "%%") - return res - - util._reprcompare = callbinrepr - - -def pytest_runtest_teardown(item): - util._reprcompare = None - - -def pytest_sessionfinish(session): - assertstate = getattr(session.config, "_assertstate", None) - if assertstate: - if assertstate.hook is not None: - assertstate.hook.set_session(None) - - -# Expose this plugin's implementation for the pytest_assertrepr_compare hook -pytest_assertrepr_compare = util.assertrepr_compare diff --git a/contrib/python/pytest/py2/_pytest/assertion/rewrite.py b/contrib/python/pytest/py2/_pytest/assertion/rewrite.py deleted file mode 100644 index 6cfd81a32f..0000000000 --- a/contrib/python/pytest/py2/_pytest/assertion/rewrite.py +++ /dev/null @@ -1,1072 +0,0 @@ -# -*- coding: utf-8 -*- -"""Rewrite assertion AST to produce nice error messages""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import warnings -warnings.filterwarnings("ignore", category=DeprecationWarning, module="_pytest.assertion.rewrite") - -import ast -import errno -import imp -import itertools -import marshal -import os -import re -import string -import struct -import sys -import types - -import atomicwrites -import py -import six - -from _pytest._io.saferepr import saferepr -from _pytest.assertion import util -from _pytest.assertion.util import ( # noqa: F401 - format_explanation as _format_explanation, -) -from _pytest.compat import spec_from_file_location -from _pytest.pathlib import fnmatch_ex -from _pytest.pathlib import PurePath - -# pytest caches rewritten pycs in __pycache__. -if hasattr(imp, "get_tag"): - PYTEST_TAG = imp.get_tag() + "-PYTEST" -else: - if hasattr(sys, "pypy_version_info"): - impl = "pypy" - elif sys.platform == "java": - impl = "jython" - else: - impl = "cpython" - ver = sys.version_info - PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) - del ver, impl - -PYC_EXT = ".py" + (__debug__ and "c" or "o") -PYC_TAIL = "." + PYTEST_TAG + PYC_EXT - -ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 - -if sys.version_info >= (3, 5): - ast_Call = ast.Call -else: - - def ast_Call(a, b, c): - return ast.Call(a, b, c, None, None) - - -class AssertionRewritingHook(object): - """PEP302 Import hook which rewrites asserts.""" - - def __init__(self, config): - self.config = config - try: - self.fnpats = config.getini("python_files") - except ValueError: - self.fnpats = ["test_*.py", "*_test.py"] - self.session = None - self.modules = {} - self._rewritten_names = set() - self._must_rewrite = set() - # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, - # which might result in infinite recursion (#3506) - self._writing_pyc = False - self._basenames_to_check_rewrite = {"conftest"} - self._marked_for_rewrite_cache = {} - self._session_paths_checked = False - - def set_session(self, session): - self.session = session - self._session_paths_checked = False - - def _imp_find_module(self, name, path=None): - """Indirection so we can mock calls to find_module originated from the hook during testing""" - return imp.find_module(name, path) - - def find_module(self, name, path=None): - if self._writing_pyc: - return None - state = self.config._assertstate - if self._early_rewrite_bailout(name, state): - return None - state.trace("find_module called for: %s" % name) - names = name.rsplit(".", 1) - lastname = names[-1] - pth = None - if path is not None: - # Starting with Python 3.3, path is a _NamespacePath(), which - # causes problems if not converted to list. - path = list(path) - if len(path) == 1: - pth = path[0] - if pth is None: - try: - fd, fn, desc = self._imp_find_module(lastname, path) - except ImportError: - return None - if fd is not None: - fd.close() - tp = desc[2] - if tp == imp.PY_COMPILED: - if hasattr(imp, "source_from_cache"): - try: - fn = imp.source_from_cache(fn) - except ValueError: - # Python 3 doesn't like orphaned but still-importable - # .pyc files. - fn = fn[:-1] - else: - fn = fn[:-1] - elif tp != imp.PY_SOURCE: - # Don't know what this is. - return None - else: - fn = os.path.join(pth, name.rpartition(".")[2] + ".py") - - fn_pypath = py.path.local(fn) - if not self._should_rewrite(name, fn_pypath, state): - return None - - self._rewritten_names.add(name) - - # The requested module looks like a test file, so rewrite it. This is - # the most magical part of the process: load the source, rewrite the - # asserts, and load the rewritten source. We also cache the rewritten - # module code in a special pyc. We must be aware of the possibility of - # concurrent pytest processes rewriting and loading pycs. To avoid - # tricky race conditions, we maintain the following invariant: The - # cached pyc is always a complete, valid pyc. Operations on it must be - # atomic. POSIX's atomic rename comes in handy. - write = not sys.dont_write_bytecode - cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") - if write: - try: - os.mkdir(cache_dir) - except OSError: - e = sys.exc_info()[1].errno - if e == errno.EEXIST: - # Either the __pycache__ directory already exists (the - # common case) or it's blocked by a non-dir node. In the - # latter case, we'll ignore it in _write_pyc. - pass - elif e in [errno.ENOENT, errno.ENOTDIR]: - # One of the path components was not a directory, likely - # because we're in a zip file. - write = False - elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: - state.trace("read only directory: %r" % fn_pypath.dirname) - write = False - else: - raise - cache_name = fn_pypath.basename[:-3] + PYC_TAIL - pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going - # to check for a cached pyc. This may not be optimal... - co = _read_pyc(fn_pypath, pyc, state.trace) - if co is None: - state.trace("rewriting %r" % (fn,)) - source_stat, co = _rewrite_test(self.config, fn_pypath) - if co is None: - # Probably a SyntaxError in the test. - return None - if write: - self._writing_pyc = True - try: - _write_pyc(state, co, source_stat, pyc) - finally: - self._writing_pyc = False - else: - state.trace("found cached rewritten pyc for %r" % (fn,)) - self.modules[name] = co, pyc - return self - - def _early_rewrite_bailout(self, name, state): - """ - This is a fast way to get out of rewriting modules. Profiling has - shown that the call to imp.find_module (inside of the find_module - from this class) is a major slowdown, so, this method tries to - filter what we're sure won't be rewritten before getting to it. - """ - if self.session is not None and not self._session_paths_checked: - self._session_paths_checked = True - for path in self.session._initialpaths: - # Make something as c:/projects/my_project/path.py -> - # ['c:', 'projects', 'my_project', 'path.py'] - parts = str(path).split(os.path.sep) - # add 'path' to basenames to be checked. - self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) - - # Note: conftest already by default in _basenames_to_check_rewrite. - parts = name.split(".") - if parts[-1] in self._basenames_to_check_rewrite: - return False - - # For matching the name it must be as if it was a filename. - path = PurePath(os.path.sep.join(parts) + ".py") - - for pat in self.fnpats: - # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based - # on the name alone because we need to match against the full path - if os.path.dirname(pat): - return False - if fnmatch_ex(pat, path): - return False - - if self._is_marked_for_rewrite(name, state): - return False - - state.trace("early skip of rewriting module: %s" % (name,)) - return True - - def _should_rewrite(self, name, fn_pypath, state): - # always rewrite conftest files - fn = str(fn_pypath) - if fn_pypath.basename == "conftest.py": - state.trace("rewriting conftest file: %r" % (fn,)) - return True - - if self.session is not None: - if self.session.isinitpath(fn): - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) - return True - - # modules not passed explicitly on the command line are only - # rewritten if they match the naming convention for test files - for pat in self.fnpats: - if fn_pypath.fnmatch(pat): - state.trace("matched test file %r" % (fn,)) - return True - - return self._is_marked_for_rewrite(name, state) - - def _is_marked_for_rewrite(self, name, state): - try: - return self._marked_for_rewrite_cache[name] - except KeyError: - for marked in self._must_rewrite: - if name == marked or name.startswith(marked + "."): - state.trace("matched marked file %r (from %r)" % (name, marked)) - self._marked_for_rewrite_cache[name] = True - return True - - self._marked_for_rewrite_cache[name] = False - return False - - def mark_rewrite(self, *names): - """Mark import names as needing to be rewritten. - - The named module or package as well as any nested modules will - be rewritten on import. - """ - already_imported = ( - set(names).intersection(sys.modules).difference(self._rewritten_names) - ) - for name in already_imported: - if not AssertionRewriter.is_rewrite_disabled( - sys.modules[name].__doc__ or "" - ): - self._warn_already_imported(name) - self._must_rewrite.update(names) - self._marked_for_rewrite_cache.clear() - - def _warn_already_imported(self, name): - from _pytest.warning_types import PytestAssertRewriteWarning - from _pytest.warnings import _issue_warning_captured - - _issue_warning_captured( - PytestAssertRewriteWarning( - "Module already imported so cannot be rewritten: %s" % name - ), - self.config.hook, - stacklevel=5, - ) - - def load_module(self, name): - co, pyc = self.modules.pop(name) - if name in sys.modules: - # If there is an existing module object named 'fullname' in - # sys.modules, the loader must use that existing module. (Otherwise, - # the reload() builtin will not work correctly.) - mod = sys.modules[name] - else: - # I wish I could just call imp.load_compiled here, but __file__ has to - # be set properly. In Python 3.2+, this all would be handled correctly - # by load_compiled. - mod = sys.modules[name] = imp.new_module(name) - try: - mod.__file__ = co.co_filename - # Normally, this attribute is 3.2+. - mod.__cached__ = pyc - mod.__loader__ = self - # Normally, this attribute is 3.4+ - mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) - exec(co, mod.__dict__) - except: # noqa - if name in sys.modules: - del sys.modules[name] - raise - return sys.modules[name] - - def is_package(self, name): - try: - fd, fn, desc = self._imp_find_module(name) - except ImportError: - return False - if fd is not None: - fd.close() - tp = desc[2] - return tp == imp.PKG_DIRECTORY - - def get_data(self, pathname): - """Optional PEP302 get_data API. - """ - with open(pathname, "rb") as f: - return f.read() - - -def _write_pyc(state, co, source_stat, pyc): - # Technically, we don't have to have the same pyc format as - # (C)Python, since these "pycs" should never be seen by builtin - # import. However, there's little reason deviate, and I hope - # sometime to be able to use imp.load_compiled to load them. (See - # the comment in load_module above.) - try: - with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: - fp.write(imp.get_magic()) - # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) - mtime = int(source_stat.mtime) & 0xFFFFFFFF - size = source_stat.size & 0xFFFFFFFF - # "<LL" stands for 2 unsigned longs, little-ending - fp.write(struct.pack("<LL", mtime, size)) - fp.write(marshal.dumps(co)) - except EnvironmentError as e: - state.trace("error writing pyc file at %s: errno=%s" % (pyc, e.errno)) - # we ignore any failure to write the cache file - # there are many reasons, permission-denied, __pycache__ being a - # file etc. - return False - return True - - -RN = "\r\n".encode("utf-8") -N = "\n".encode("utf-8") - -cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+") -BOM_UTF8 = "\xef\xbb\xbf" - - -def _rewrite_test(config, fn): - """Try to read and rewrite *fn* and return the code object.""" - state = config._assertstate - try: - stat = fn.stat() - source = fn.read("rb") - except EnvironmentError: - return None, None - if ASCII_IS_DEFAULT_ENCODING: - # ASCII is the default encoding in Python 2. Without a coding - # declaration, Python 2 will complain about any bytes in the file - # outside the ASCII range. Sadly, this behavior does not extend to - # compile() or ast.parse(), which prefer to interpret the bytes as - # latin-1. (At least they properly handle explicit coding cookies.) To - # preserve this error behavior, we could force ast.parse() to use ASCII - # as the encoding by inserting a coding cookie. Unfortunately, that - # messes up line numbers. Thus, we have to check ourselves if anything - # is outside the ASCII range in the case no encoding is explicitly - # declared. For more context, see issue #269. Yay for Python 3 which - # gets this right. - end1 = source.find("\n") - end2 = source.find("\n", end1 + 1) - if ( - not source.startswith(BOM_UTF8) - and cookie_re.match(source[0:end1]) is None - and cookie_re.match(source[end1 + 1 : end2]) is None - ): - if hasattr(state, "_indecode"): - # encodings imported us again, so don't rewrite. - return None, None - state._indecode = True - try: - try: - source.decode("ascii") - except UnicodeDecodeError: - # Let it fail in real import. - return None, None - finally: - del state._indecode - try: - tree = ast.parse(source, filename=fn.strpath) - except SyntaxError: - # Let this pop up again in the real import. - state.trace("failed to parse: %r" % (fn,)) - return None, None - rewrite_asserts(tree, fn, config) - try: - co = compile(tree, fn.strpath, "exec", dont_inherit=True) - except SyntaxError: - # It's possible that this error is from some bug in the - # assertion rewriting, but I don't know of a fast way to tell. - state.trace("failed to compile: %r" % (fn,)) - return None, None - return stat, co - - -def _read_pyc(source, pyc, trace=lambda x: None): - """Possibly read a pytest pyc containing rewritten code. - - Return rewritten code if successful or None if not. - """ - try: - fp = open(pyc, "rb") - except IOError: - return None - with fp: - try: - mtime = int(source.mtime()) - size = source.size() - data = fp.read(12) - except EnvironmentError as e: - trace("_read_pyc(%s): EnvironmentError %s" % (source, e)) - return None - # Check for invalid or out of date pyc file. - if ( - len(data) != 12 - or data[:4] != imp.get_magic() - or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF) - ): - trace("_read_pyc(%s): invalid or out of date pyc" % source) - return None - try: - co = marshal.load(fp) - except Exception as e: - trace("_read_pyc(%s): marshal.load error %s" % (source, e)) - return None - if not isinstance(co, types.CodeType): - trace("_read_pyc(%s): not a code object" % source) - return None - return co - - -def rewrite_asserts(mod, module_path=None, config=None): - """Rewrite the assert statements in mod.""" - AssertionRewriter(module_path, config).run(mod) - - -def _saferepr(obj): - """Get a safe repr of an object for assertion error messages. - - The assertion formatting (util.format_explanation()) requires - newlines to be escaped since they are a special character for it. - Normally assertion.util.format_explanation() does this but for a - custom repr it is possible to contain one of the special escape - sequences, especially '\n{' and '\n}' are likely to be present in - JSON reprs. - - """ - r = saferepr(obj) - # only occurs in python2.x, repr must return text in python3+ - if isinstance(r, bytes): - # Represent unprintable bytes as `\x##` - r = u"".join( - u"\\x{:x}".format(ord(c)) if c not in string.printable else c.decode() - for c in r - ) - return r.replace(u"\n", u"\\n") - - -def _format_assertmsg(obj): - """Format the custom assertion message given. - - For strings this simply replaces newlines with '\n~' so that - util.format_explanation() will preserve them instead of escaping - newlines. For other objects saferepr() is used first. - - """ - # reprlib appears to have a bug which means that if a string - # contains a newline it gets escaped, however if an object has a - # .__repr__() which contains newlines it does not get escaped. - # However in either case we want to preserve the newline. - replaces = [(u"\n", u"\n~"), (u"%", u"%%")] - if not isinstance(obj, six.string_types): - obj = saferepr(obj) - replaces.append((u"\\n", u"\n~")) - - if isinstance(obj, bytes): - replaces = [(r1.encode(), r2.encode()) for r1, r2 in replaces] - - for r1, r2 in replaces: - obj = obj.replace(r1, r2) - - return obj - - -def _should_repr_global_name(obj): - if callable(obj): - return False - - try: - return not hasattr(obj, "__name__") - except Exception: - return True - - -def _format_boolop(explanations, is_or): - explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" - if isinstance(explanation, six.text_type): - return explanation.replace(u"%", u"%%") - else: - return explanation.replace(b"%", b"%%") - - -def _call_reprcompare(ops, results, expls, each_obj): - for i, res, expl in zip(range(len(ops)), results, expls): - try: - done = not res - except Exception: - done = True - if done: - break - if util._reprcompare is not None: - custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) - if custom is not None: - return custom - return expl - - -unary_map = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} - -binop_map = { - ast.BitOr: "|", - ast.BitXor: "^", - ast.BitAnd: "&", - ast.LShift: "<<", - ast.RShift: ">>", - ast.Add: "+", - ast.Sub: "-", - ast.Mult: "*", - ast.Div: "/", - ast.FloorDiv: "//", - ast.Mod: "%%", # escaped for string formatting - ast.Eq: "==", - ast.NotEq: "!=", - ast.Lt: "<", - ast.LtE: "<=", - ast.Gt: ">", - ast.GtE: ">=", - ast.Pow: "**", - ast.Is: "is", - ast.IsNot: "is not", - ast.In: "in", - ast.NotIn: "not in", -} -# Python 3.5+ compatibility -try: - binop_map[ast.MatMult] = "@" -except AttributeError: - pass - -# Python 3.4+ compatibility -if hasattr(ast, "NameConstant"): - _NameConstant = ast.NameConstant -else: - - def _NameConstant(c): - return ast.Name(str(c), ast.Load()) - - -def set_location(node, lineno, col_offset): - """Set node location information recursively.""" - - def _fix(node, lineno, col_offset): - if "lineno" in node._attributes: - node.lineno = lineno - if "col_offset" in node._attributes: - node.col_offset = col_offset - for child in ast.iter_child_nodes(node): - _fix(child, lineno, col_offset) - - _fix(node, lineno, col_offset) - return node - - -class AssertionRewriter(ast.NodeVisitor): - """Assertion rewriting implementation. - - The main entrypoint is to call .run() with an ast.Module instance, - this will then find all the assert statements and rewrite them to - provide intermediate values and a detailed assertion error. See - http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html - for an overview of how this works. - - The entry point here is .run() which will iterate over all the - statements in an ast.Module and for each ast.Assert statement it - finds call .visit() with it. Then .visit_Assert() takes over and - is responsible for creating new ast statements to replace the - original assert statement: it rewrites the test of an assertion - to provide intermediate values and replace it with an if statement - which raises an assertion error with a detailed explanation in - case the expression is false. - - For this .visit_Assert() uses the visitor pattern to visit all the - AST nodes of the ast.Assert.test field, each visit call returning - an AST node and the corresponding explanation string. During this - state is kept in several instance attributes: - - :statements: All the AST statements which will replace the assert - statement. - - :variables: This is populated by .variable() with each variable - used by the statements so that they can all be set to None at - the end of the statements. - - :variable_counter: Counter to create new unique variables needed - by statements. Variables are created using .variable() and - have the form of "@py_assert0". - - :on_failure: The AST statements which will be executed if the - assertion test fails. This is the code which will construct - the failure message and raises the AssertionError. - - :explanation_specifiers: A dict filled by .explanation_param() - with %-formatting placeholders and their corresponding - expressions to use in the building of an assertion message. - This is used by .pop_format_context() to build a message. - - :stack: A stack of the explanation_specifiers dicts maintained by - .push_format_context() and .pop_format_context() which allows - to build another %-formatted string while already building one. - - This state is reset on every new assert statement visited and used - by the other visitors. - - """ - - def __init__(self, module_path, config): - super(AssertionRewriter, self).__init__() - self.module_path = module_path - self.config = config - - def run(self, mod): - """Find all assert statements in *mod* and rewrite them.""" - if not mod.body: - # Nothing to do. - return - # Insert some special imports at the top of the module but after any - # docstrings and __future__ imports. - aliases = [ - ast.alias(six.moves.builtins.__name__, "@py_builtins"), - ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), - ] - doc = getattr(mod, "docstring", None) - expect_docstring = doc is None - if doc is not None and self.is_rewrite_disabled(doc): - return - pos = 0 - lineno = 1 - for item in mod.body: - if ( - expect_docstring - and isinstance(item, ast.Expr) - and isinstance(item.value, ast.Str) - ): - doc = item.value.s - if self.is_rewrite_disabled(doc): - return - expect_docstring = False - elif ( - not isinstance(item, ast.ImportFrom) - or item.level > 0 - or item.module != "__future__" - ): - lineno = item.lineno - break - pos += 1 - else: - lineno = item.lineno - imports = [ - ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases - ] - mod.body[pos:pos] = imports - # Collect asserts. - nodes = [mod] - while nodes: - node = nodes.pop() - for name, field in ast.iter_fields(node): - if isinstance(field, list): - new = [] - for i, child in enumerate(field): - if isinstance(child, ast.Assert): - # Transform assert. - new.extend(self.visit(child)) - else: - new.append(child) - if isinstance(child, ast.AST): - nodes.append(child) - setattr(node, name, new) - elif ( - isinstance(field, ast.AST) - # Don't recurse into expressions as they can't contain - # asserts. - and not isinstance(field, ast.expr) - ): - nodes.append(field) - - @staticmethod - def is_rewrite_disabled(docstring): - return "PYTEST_DONT_REWRITE" in docstring - - def variable(self): - """Get a new variable.""" - # Use a character invalid in python identifiers to avoid clashing. - name = "@py_assert" + str(next(self.variable_counter)) - self.variables.append(name) - return name - - def assign(self, expr): - """Give *expr* a name.""" - name = self.variable() - self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) - return ast.Name(name, ast.Load()) - - def display(self, expr): - """Call saferepr on the expression.""" - return self.helper("_saferepr", expr) - - def helper(self, name, *args): - """Call a helper in this module.""" - py_name = ast.Name("@pytest_ar", ast.Load()) - attr = ast.Attribute(py_name, name, ast.Load()) - return ast_Call(attr, list(args), []) - - def builtin(self, name): - """Return the builtin called *name*.""" - builtin_name = ast.Name("@py_builtins", ast.Load()) - return ast.Attribute(builtin_name, name, ast.Load()) - - def explanation_param(self, expr): - """Return a new named %-formatting placeholder for expr. - - This creates a %-formatting placeholder for expr in the - current formatting context, e.g. ``%(py0)s``. The placeholder - and expr are placed in the current format context so that it - can be used on the next call to .pop_format_context(). - - """ - specifier = "py" + str(next(self.variable_counter)) - self.explanation_specifiers[specifier] = expr - return "%(" + specifier + ")s" - - def push_format_context(self): - """Create a new formatting context. - - The format context is used for when an explanation wants to - have a variable value formatted in the assertion message. In - this case the value required can be added using - .explanation_param(). Finally .pop_format_context() is used - to format a string of %-formatted values as added by - .explanation_param(). - - """ - self.explanation_specifiers = {} - self.stack.append(self.explanation_specifiers) - - def pop_format_context(self, expl_expr): - """Format the %-formatted string with current format context. - - The expl_expr should be an ast.Str instance constructed from - the %-placeholders created by .explanation_param(). This will - add the required code to format said string to .on_failure and - return the ast.Name instance of the formatted string. - - """ - current = self.stack.pop() - if self.stack: - self.explanation_specifiers = self.stack[-1] - keys = [ast.Str(key) for key in current.keys()] - format_dict = ast.Dict(keys, list(current.values())) - form = ast.BinOp(expl_expr, ast.Mod(), format_dict) - name = "@py_format" + str(next(self.variable_counter)) - self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) - return ast.Name(name, ast.Load()) - - def generic_visit(self, node): - """Handle expressions we don't have custom code for.""" - assert isinstance(node, ast.expr) - res = self.assign(node) - return res, self.explanation_param(self.display(res)) - - def visit_Assert(self, assert_): - """Return the AST statements to replace the ast.Assert instance. - - This rewrites the test of an assertion to provide - intermediate values and replace it with an if statement which - raises an assertion error with a detailed explanation in case - the expression is false. - - """ - if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: - from _pytest.warning_types import PytestAssertRewriteWarning - import warnings - - warnings.warn_explicit( - PytestAssertRewriteWarning( - "assertion is always true, perhaps remove parentheses?" - ), - category=None, - filename=str(self.module_path), - lineno=assert_.lineno, - ) - - self.statements = [] - self.variables = [] - self.variable_counter = itertools.count() - self.stack = [] - self.on_failure = [] - self.push_format_context() - # Rewrite assert into a bunch of statements. - top_condition, explanation = self.visit(assert_.test) - # If in a test module, check if directly asserting None, in order to warn [Issue #3191] - if self.module_path is not None: - self.statements.append( - self.warn_about_none_ast( - top_condition, module_path=self.module_path, lineno=assert_.lineno - ) - ) - # Create failure message. - body = self.on_failure - negation = ast.UnaryOp(ast.Not(), top_condition) - self.statements.append(ast.If(negation, body, [])) - if assert_.msg: - assertmsg = self.helper("_format_assertmsg", assert_.msg) - explanation = "\n>assert " + explanation - else: - assertmsg = ast.Str("") - explanation = "assert " + explanation - template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) - msg = self.pop_format_context(template) - fmt = self.helper("_format_explanation", msg) - err_name = ast.Name("AssertionError", ast.Load()) - exc = ast_Call(err_name, [fmt], []) - if sys.version_info[0] >= 3: - raise_ = ast.Raise(exc, None) - else: - raise_ = ast.Raise(exc, None, None) - body.append(raise_) - # Clear temporary variables by setting them to None. - if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] - clear = ast.Assign(variables, _NameConstant(None)) - self.statements.append(clear) - # Fix line numbers. - for stmt in self.statements: - set_location(stmt, assert_.lineno, assert_.col_offset) - return self.statements - - def warn_about_none_ast(self, node, module_path, lineno): - """ - Returns an AST issuing a warning if the value of node is `None`. - This is used to warn the user when asserting a function that asserts - internally already. - See issue #3191 for more details. - """ - - # Using parse because it is different between py2 and py3. - AST_NONE = ast.parse("None").body[0].value - val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) - send_warning = ast.parse( - """ -from _pytest.warning_types import PytestAssertRewriteWarning -from warnings import warn_explicit -warn_explicit( - PytestAssertRewriteWarning('asserting the value None, please use "assert is None"'), - category=None, - filename={filename!r}, - lineno={lineno}, -) - """.format( - filename=module_path.strpath, lineno=lineno - ) - ).body - return ast.If(val_is_none, send_warning, []) - - def visit_Name(self, name): - # Display the repr of the name if it's a local variable or - # _should_repr_global_name() thinks it's acceptable. - locs = ast_Call(self.builtin("locals"), [], []) - inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) - dorepr = self.helper("_should_repr_global_name", name) - test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) - expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) - return name, self.explanation_param(expr) - - def visit_BoolOp(self, boolop): - res_var = self.variable() - expl_list = self.assign(ast.List([], ast.Load())) - app = ast.Attribute(expl_list, "append", ast.Load()) - is_or = int(isinstance(boolop.op, ast.Or)) - body = save = self.statements - fail_save = self.on_failure - levels = len(boolop.values) - 1 - self.push_format_context() - # Process each operand, short-circuting if needed. - for i, v in enumerate(boolop.values): - if i: - fail_inner = [] - # cond is set in a prior loop iteration below - self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa - self.on_failure = fail_inner - self.push_format_context() - res, expl = self.visit(v) - body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) - expl_format = self.pop_format_context(ast.Str(expl)) - call = ast_Call(app, [expl_format], []) - self.on_failure.append(ast.Expr(call)) - if i < levels: - cond = res - if is_or: - cond = ast.UnaryOp(ast.Not(), cond) - inner = [] - self.statements.append(ast.If(cond, inner, [])) - self.statements = body = inner - self.statements = save - self.on_failure = fail_save - expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or)) - expl = self.pop_format_context(expl_template) - return ast.Name(res_var, ast.Load()), self.explanation_param(expl) - - def visit_UnaryOp(self, unary): - pattern = unary_map[unary.op.__class__] - operand_res, operand_expl = self.visit(unary.operand) - res = self.assign(ast.UnaryOp(unary.op, operand_res)) - return res, pattern % (operand_expl,) - - def visit_BinOp(self, binop): - symbol = binop_map[binop.op.__class__] - left_expr, left_expl = self.visit(binop.left) - right_expr, right_expl = self.visit(binop.right) - explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) - res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) - return res, explanation - - def visit_Call_35(self, call): - """ - visit `ast.Call` nodes on Python3.5 and after - """ - new_func, func_expl = self.visit(call.func) - arg_expls = [] - new_args = [] - new_kwargs = [] - for arg in call.args: - res, expl = self.visit(arg) - arg_expls.append(expl) - new_args.append(res) - for keyword in call.keywords: - res, expl = self.visit(keyword.value) - new_kwargs.append(ast.keyword(keyword.arg, res)) - if keyword.arg: - arg_expls.append(keyword.arg + "=" + expl) - else: # **args have `arg` keywords with an .arg of None - arg_expls.append("**" + expl) - - expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs) - res = self.assign(new_call) - res_expl = self.explanation_param(self.display(res)) - outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) - return res, outer_expl - - def visit_Starred(self, starred): - # From Python 3.5, a Starred node can appear in a function call - res, expl = self.visit(starred.value) - new_starred = ast.Starred(res, starred.ctx) - return new_starred, "*" + expl - - def visit_Call_legacy(self, call): - """ - visit `ast.Call nodes on 3.4 and below` - """ - new_func, func_expl = self.visit(call.func) - arg_expls = [] - new_args = [] - new_kwargs = [] - new_star = new_kwarg = None - for arg in call.args: - res, expl = self.visit(arg) - new_args.append(res) - arg_expls.append(expl) - for keyword in call.keywords: - res, expl = self.visit(keyword.value) - new_kwargs.append(ast.keyword(keyword.arg, res)) - arg_expls.append(keyword.arg + "=" + expl) - if call.starargs: - new_star, expl = self.visit(call.starargs) - arg_expls.append("*" + expl) - if call.kwargs: - new_kwarg, expl = self.visit(call.kwargs) - arg_expls.append("**" + expl) - expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) - res = self.assign(new_call) - res_expl = self.explanation_param(self.display(res)) - outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) - return res, outer_expl - - # ast.Call signature changed on 3.5, - # conditionally change which methods is named - # visit_Call depending on Python version - if sys.version_info >= (3, 5): - visit_Call = visit_Call_35 - else: - visit_Call = visit_Call_legacy - - def visit_Attribute(self, attr): - if not isinstance(attr.ctx, ast.Load): - return self.generic_visit(attr) - value, value_expl = self.visit(attr.value) - res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) - res_expl = self.explanation_param(self.display(res)) - pat = "%s\n{%s = %s.%s\n}" - expl = pat % (res_expl, res_expl, value_expl, attr.attr) - return res, expl - - def visit_Compare(self, comp): - self.push_format_context() - left_res, left_expl = self.visit(comp.left) - if isinstance(comp.left, (ast.Compare, ast.BoolOp)): - left_expl = "({})".format(left_expl) - res_variables = [self.variable() for i in range(len(comp.ops))] - load_names = [ast.Name(v, ast.Load()) for v in res_variables] - store_names = [ast.Name(v, ast.Store()) for v in res_variables] - it = zip(range(len(comp.ops)), comp.ops, comp.comparators) - expls = [] - syms = [] - results = [left_res] - for i, op, next_operand in it: - next_res, next_expl = self.visit(next_operand) - if isinstance(next_operand, (ast.Compare, ast.BoolOp)): - next_expl = "({})".format(next_expl) - results.append(next_res) - sym = binop_map[op.__class__] - syms.append(ast.Str(sym)) - expl = "%s %s %s" % (left_expl, sym, next_expl) - expls.append(ast.Str(expl)) - res_expr = ast.Compare(left_res, [op], [next_res]) - self.statements.append(ast.Assign([store_names[i]], res_expr)) - left_res, left_expl = next_res, next_expl - # Use pytest.assertion.util._reprcompare if that's available. - expl_call = self.helper( - "_call_reprcompare", - ast.Tuple(syms, ast.Load()), - ast.Tuple(load_names, ast.Load()), - ast.Tuple(expls, ast.Load()), - ast.Tuple(results, ast.Load()), - ) - if len(comp.ops) > 1: - res = ast.BoolOp(ast.And(), load_names) - else: - res = load_names[0] - return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/contrib/python/pytest/py2/_pytest/assertion/truncate.py b/contrib/python/pytest/py2/_pytest/assertion/truncate.py deleted file mode 100644 index 525896ea9a..0000000000 --- a/contrib/python/pytest/py2/_pytest/assertion/truncate.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Utilities for truncating assertion output. - -Current default behaviour is to truncate assertion explanations at -~8 terminal lines, unless running in "-vv" mode or running on CI. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import six - -DEFAULT_MAX_LINES = 8 -DEFAULT_MAX_CHARS = 8 * 80 -USAGE_MSG = "use '-vv' to show" - - -def truncate_if_required(explanation, item, max_length=None): - """ - Truncate this assertion explanation if the given test item is eligible. - """ - if _should_truncate_item(item): - return _truncate_explanation(explanation) - return explanation - - -def _should_truncate_item(item): - """ - Whether or not this test item is eligible for truncation. - """ - verbose = item.config.option.verbose - return verbose < 2 and not _running_on_ci() - - -def _running_on_ci(): - """Check if we're currently running on a CI system.""" - env_vars = ["CI", "BUILD_NUMBER"] - return any(var in os.environ for var in env_vars) - - -def _truncate_explanation(input_lines, max_lines=None, max_chars=None): - """ - Truncate given list of strings that makes up the assertion explanation. - - Truncates to either 8 lines, or 640 characters - whichever the input reaches - first. The remaining lines will be replaced by a usage message. - """ - - if max_lines is None: - max_lines = DEFAULT_MAX_LINES - if max_chars is None: - max_chars = DEFAULT_MAX_CHARS - - # Check if truncation required - input_char_count = len("".join(input_lines)) - if len(input_lines) <= max_lines and input_char_count <= max_chars: - return input_lines - - # Truncate first to max_lines, and then truncate to max_chars if max_chars - # is exceeded. - truncated_explanation = input_lines[:max_lines] - truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) - - # Add ellipsis to final line - truncated_explanation[-1] = truncated_explanation[-1] + "..." - - # Append useful message to explanation - truncated_line_count = len(input_lines) - len(truncated_explanation) - truncated_line_count += 1 # Account for the part-truncated final line - msg = "...Full output truncated" - if truncated_line_count == 1: - msg += " ({} line hidden)".format(truncated_line_count) - else: - msg += " ({} lines hidden)".format(truncated_line_count) - msg += ", {}".format(USAGE_MSG) - truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) - return truncated_explanation - - -def _truncate_by_char_count(input_lines, max_chars): - # Check if truncation required - if len("".join(input_lines)) <= max_chars: - return input_lines - - # Find point at which input length exceeds total allowed length - iterated_char_count = 0 - for iterated_index, input_line in enumerate(input_lines): - if iterated_char_count + len(input_line) > max_chars: - break - iterated_char_count += len(input_line) - - # Create truncated explanation with modified final line - truncated_result = input_lines[:iterated_index] - final_line = input_lines[iterated_index] - if final_line: - final_line_truncate_point = max_chars - iterated_char_count - final_line = final_line[:final_line_truncate_point] - truncated_result.append(final_line) - return truncated_result diff --git a/contrib/python/pytest/py2/_pytest/assertion/util.py b/contrib/python/pytest/py2/_pytest/assertion/util.py deleted file mode 100644 index c382f1c609..0000000000 --- a/contrib/python/pytest/py2/_pytest/assertion/util.py +++ /dev/null @@ -1,421 +0,0 @@ -# -*- coding: utf-8 -*- -"""Utilities for assertion debugging""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pprint - -import six - -import _pytest._code -from ..compat import Sequence -from _pytest import outcomes -from _pytest._io.saferepr import saferepr -from _pytest.compat import ATTRS_EQ_FIELD - -# The _reprcompare attribute on the util module is used by the new assertion -# interpretation code and assertion rewriter to detect this plugin was -# loaded and in turn call the hooks defined here as part of the -# DebugInterpreter. -_reprcompare = None - - -# the re-encoding is needed for python2 repr -# with non-ascii characters (see issue 877 and 1379) -def ecu(s): - if isinstance(s, bytes): - return s.decode("UTF-8", "replace") - else: - return s - - -def format_explanation(explanation): - """This formats an explanation - - Normally all embedded newlines are escaped, however there are - three exceptions: \n{, \n} and \n~. The first two are intended - cover nested explanations, see function and attribute explanations - for examples (.visit_Call(), visit_Attribute()). The last one is - for when one explanation needs to span multiple lines, e.g. when - displaying diffs. - """ - explanation = ecu(explanation) - lines = _split_explanation(explanation) - result = _format_lines(lines) - return u"\n".join(result) - - -def _split_explanation(explanation): - """Return a list of individual lines in the explanation - - This will return a list of lines split on '\n{', '\n}' and '\n~'. - Any other newlines will be escaped and appear in the line as the - literal '\n' characters. - """ - raw_lines = (explanation or u"").split("\n") - lines = [raw_lines[0]] - for values in raw_lines[1:]: - if values and values[0] in ["{", "}", "~", ">"]: - lines.append(values) - else: - lines[-1] += "\\n" + values - return lines - - -def _format_lines(lines): - """Format the individual lines - - This will replace the '{', '}' and '~' characters of our mini - formatting language with the proper 'where ...', 'and ...' and ' + - ...' text, taking care of indentation along the way. - - Return a list of formatted lines. - """ - result = lines[:1] - stack = [0] - stackcnt = [0] - for line in lines[1:]: - if line.startswith("{"): - if stackcnt[-1]: - s = u"and " - else: - s = u"where " - stack.append(len(result)) - stackcnt[-1] += 1 - stackcnt.append(0) - result.append(u" +" + u" " * (len(stack) - 1) + s + line[1:]) - elif line.startswith("}"): - stack.pop() - stackcnt.pop() - result[stack[-1]] += line[1:] - else: - assert line[0] in ["~", ">"] - stack[-1] += 1 - indent = len(stack) if line.startswith("~") else len(stack) - 1 - result.append(u" " * indent + line[1:]) - assert len(stack) == 1 - return result - - -# Provide basestring in python3 -try: - basestring = basestring -except NameError: - basestring = str - - -def issequence(x): - return isinstance(x, Sequence) and not isinstance(x, basestring) - - -def istext(x): - return isinstance(x, basestring) - - -def isdict(x): - return isinstance(x, dict) - - -def isset(x): - return isinstance(x, (set, frozenset)) - - -def isdatacls(obj): - return getattr(obj, "__dataclass_fields__", None) is not None - - -def isattrs(obj): - return getattr(obj, "__attrs_attrs__", None) is not None - - -def isiterable(obj): - try: - iter(obj) - return not istext(obj) - except TypeError: - return False - - -def assertrepr_compare(config, op, left, right): - """Return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = saferepr(left, maxsize=int(width // 2)) - right_repr = saferepr(right, maxsize=width - len(left_repr)) - - summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr)) - - verbose = config.getoption("verbose") - explanation = None - try: - if op == "==": - if istext(left) and istext(right): - explanation = _diff_text(left, right, verbose) - else: - if issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right, verbose) - elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right, verbose) - elif isdict(left) and isdict(right): - explanation = _compare_eq_dict(left, right, verbose) - elif type(left) == type(right) and (isdatacls(left) or isattrs(left)): - type_fn = (isdatacls, isattrs) - explanation = _compare_eq_cls(left, right, verbose, type_fn) - elif verbose > 0: - explanation = _compare_eq_verbose(left, right) - if isiterable(left) and isiterable(right): - expl = _compare_eq_iterable(left, right, verbose) - if explanation is not None: - explanation.extend(expl) - else: - explanation = expl - elif op == "not in": - if istext(left) and istext(right): - explanation = _notin_text(left, right, verbose) - except outcomes.Exit: - raise - except Exception: - explanation = [ - u"(pytest_assertion plugin: representation of details failed. " - u"Probably an object has a faulty __repr__.)", - six.text_type(_pytest._code.ExceptionInfo.from_current()), - ] - - if not explanation: - return None - - return [summary] + explanation - - -def _diff_text(left, right, verbose=0): - """Return the explanation for the diff between text or bytes. - - Unless --verbose is used this will skip leading and trailing - characters which are identical to keep the diff minimal. - - If the input are bytes they will be safely converted to text. - """ - from difflib import ndiff - - explanation = [] - - def escape_for_readable_diff(binary_text): - """ - Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. - This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape - newlines and carriage returns (#429). - """ - r = six.text_type(repr(binary_text)[1:-1]) - r = r.replace(r"\n", "\n") - r = r.replace(r"\r", "\r") - return r - - if isinstance(left, bytes): - left = escape_for_readable_diff(left) - if isinstance(right, bytes): - right = escape_for_readable_diff(right) - if verbose < 1: - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = [ - u"Skipping %s identical leading characters in diff, use -v to show" % i - ] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: - break - if i > 42: - i -= 10 # Provide some context - explanation += [ - u"Skipping {} identical trailing " - u"characters in diff, use -v to show".format(i) - ] - left = left[:-i] - right = right[:-i] - keepends = True - if left.isspace() or right.isspace(): - left = repr(str(left)) - right = repr(str(right)) - explanation += [u"Strings contain only whitespace, escaping them using repr()"] - explanation += [ - line.strip("\n") - for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) - ] - return explanation - - -def _compare_eq_verbose(left, right): - keepends = True - left_lines = repr(left).splitlines(keepends) - right_lines = repr(right).splitlines(keepends) - - explanation = [] - explanation += [u"-" + line for line in left_lines] - explanation += [u"+" + line for line in right_lines] - - return explanation - - -def _compare_eq_iterable(left, right, verbose=0): - if not verbose: - return [u"Use -v to get the full diff"] - # dynamic import to speedup pytest - import difflib - - try: - left_formatting = pprint.pformat(left).splitlines() - right_formatting = pprint.pformat(right).splitlines() - explanation = [u"Full diff:"] - except Exception: - # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling - # sorted() on a list would raise. See issue #718. - # As a workaround, the full diff is generated by using the repr() string of each item of each container. - left_formatting = sorted(repr(x) for x in left) - right_formatting = sorted(repr(x) for x in right) - explanation = [u"Full diff (fallback to calling repr on each item):"] - explanation.extend( - line.strip() for line in difflib.ndiff(left_formatting, right_formatting) - ) - return explanation - - -def _compare_eq_sequence(left, right, verbose=0): - explanation = [] - len_left = len(left) - len_right = len(right) - for i in range(min(len_left, len_right)): - if left[i] != right[i]: - explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])] - break - len_diff = len_left - len_right - - if len_diff: - if len_diff > 0: - dir_with_more = "Left" - extra = saferepr(left[len_right]) - else: - len_diff = 0 - len_diff - dir_with_more = "Right" - extra = saferepr(right[len_left]) - - if len_diff == 1: - explanation += [u"%s contains one more item: %s" % (dir_with_more, extra)] - else: - explanation += [ - u"%s contains %d more items, first extra item: %s" - % (dir_with_more, len_diff, extra) - ] - return explanation - - -def _compare_eq_set(left, right, verbose=0): - explanation = [] - diff_left = left - right - diff_right = right - left - if diff_left: - explanation.append(u"Extra items in the left set:") - for item in diff_left: - explanation.append(saferepr(item)) - if diff_right: - explanation.append(u"Extra items in the right set:") - for item in diff_right: - explanation.append(saferepr(item)) - return explanation - - -def _compare_eq_dict(left, right, verbose=0): - explanation = [] - set_left = set(left) - set_right = set(right) - common = set_left.intersection(set_right) - same = {k: left[k] for k in common if left[k] == right[k]} - if same and verbose < 2: - explanation += [u"Omitting %s identical items, use -vv to show" % len(same)] - elif same: - explanation += [u"Common items:"] - explanation += pprint.pformat(same).splitlines() - diff = {k for k in common if left[k] != right[k]} - if diff: - explanation += [u"Differing items:"] - for k in diff: - explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] - extra_left = set_left - set_right - len_extra_left = len(extra_left) - if len_extra_left: - explanation.append( - u"Left contains %d more item%s:" - % (len_extra_left, "" if len_extra_left == 1 else "s") - ) - explanation.extend( - pprint.pformat({k: left[k] for k in extra_left}).splitlines() - ) - extra_right = set_right - set_left - len_extra_right = len(extra_right) - if len_extra_right: - explanation.append( - u"Right contains %d more item%s:" - % (len_extra_right, "" if len_extra_right == 1 else "s") - ) - explanation.extend( - pprint.pformat({k: right[k] for k in extra_right}).splitlines() - ) - return explanation - - -def _compare_eq_cls(left, right, verbose, type_fns): - isdatacls, isattrs = type_fns - if isdatacls(left): - all_fields = left.__dataclass_fields__ - fields_to_check = [field for field, info in all_fields.items() if info.compare] - elif isattrs(left): - all_fields = left.__attrs_attrs__ - fields_to_check = [ - field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD) - ] - - same = [] - diff = [] - for field in fields_to_check: - if getattr(left, field) == getattr(right, field): - same.append(field) - else: - diff.append(field) - - explanation = [] - if same and verbose < 2: - explanation.append(u"Omitting %s identical items, use -vv to show" % len(same)) - elif same: - explanation += [u"Matching attributes:"] - explanation += pprint.pformat(same).splitlines() - if diff: - explanation += [u"Differing attributes:"] - for field in diff: - explanation += [ - (u"%s: %r != %r") % (field, getattr(left, field), getattr(right, field)) - ] - return explanation - - -def _notin_text(term, text, verbose=0): - index = text.find(term) - head = text[:index] - tail = text[index + len(term) :] - correct_text = head + tail - diff = _diff_text(correct_text, text, verbose) - newdiff = [u"%s is contained here:" % saferepr(term, maxsize=42)] - for line in diff: - if line.startswith(u"Skipping"): - continue - if line.startswith(u"- "): - continue - if line.startswith(u"+ "): - newdiff.append(u" " + line[2:]) - else: - newdiff.append(line) - return newdiff diff --git a/contrib/python/pytest/py2/_pytest/cacheprovider.py b/contrib/python/pytest/py2/_pytest/cacheprovider.py deleted file mode 100644 index f5c5545484..0000000000 --- a/contrib/python/pytest/py2/_pytest/cacheprovider.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -""" -merged implementation of the cache provider - -the name cache was not chosen to ensure pluggy automatically -ignores the external pytest-cache -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import os -from collections import OrderedDict - -import attr -import py -import six - -import pytest -from .compat import _PY2 as PY2 -from .pathlib import Path -from .pathlib import resolve_from_str -from .pathlib import rm_rf - -README_CONTENT = u"""\ -# pytest cache directory # - -This directory contains data from the pytest's cache plugin, -which provides the `--lf` and `--ff` options, as well as the `cache` fixture. - -**Do not** commit this to version control. - -See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. -""" - -CACHEDIR_TAG_CONTENT = b"""\ -Signature: 8a477f597d28d172789f06886806bc55 -# This file is a cache directory tag created by pytest. -# For information about cache directory tags, see: -# http://www.bford.info/cachedir/spec.html -""" - - -@attr.s -class Cache(object): - _cachedir = attr.ib(repr=False) - _config = attr.ib(repr=False) - - @classmethod - def for_config(cls, config): - cachedir = cls.cache_dir_from_config(config) - if config.getoption("cacheclear") and cachedir.exists(): - rm_rf(cachedir) - cachedir.mkdir() - return cls(cachedir, config) - - @staticmethod - def cache_dir_from_config(config): - return resolve_from_str(config.getini("cache_dir"), config.rootdir) - - def warn(self, fmt, **args): - from _pytest.warnings import _issue_warning_captured - from _pytest.warning_types import PytestCacheWarning - - _issue_warning_captured( - PytestCacheWarning(fmt.format(**args) if args else fmt), - self._config.hook, - stacklevel=3, - ) - - def makedir(self, name): - """ return a directory path object with the given name. If the - directory does not yet exist, it will be created. You can use it - to manage files likes e. g. store/retrieve database - dumps across test sessions. - - :param name: must be a string not containing a ``/`` separator. - Make sure the name contains your plugin or application - identifiers to prevent clashes with other cache users. - """ - name = Path(name) - if len(name.parts) > 1: - raise ValueError("name is not allowed to contain path separators") - res = self._cachedir.joinpath("d", name) - res.mkdir(exist_ok=True, parents=True) - return py.path.local(res) - - def _getvaluepath(self, key): - return self._cachedir.joinpath("v", Path(key)) - - def get(self, key, default): - """ return cached value for the given key. If no value - was yet cached or the value cannot be read, the specified - default is returned. - - :param key: must be a ``/`` separated value. Usually the first - name is the name of your plugin or your application. - :param default: must be provided in case of a cache-miss or - invalid cache values. - - """ - path = self._getvaluepath(key) - try: - with path.open("r") as f: - return json.load(f) - except (ValueError, IOError, OSError): - return default - - def set(self, key, value): - """ save value for the given key. - - :param key: must be a ``/`` separated value. Usually the first - name is the name of your plugin or your application. - :param value: must be of any combination of basic - python types, including nested types - like e. g. lists of dictionaries. - """ - path = self._getvaluepath(key) - try: - if path.parent.is_dir(): - cache_dir_exists_already = True - else: - cache_dir_exists_already = self._cachedir.exists() - path.parent.mkdir(exist_ok=True, parents=True) - except (IOError, OSError): - self.warn("could not create cache path {path}", path=path) - return - if not cache_dir_exists_already: - self._ensure_supporting_files() - try: - f = path.open("wb" if PY2 else "w") - except (IOError, OSError): - self.warn("cache could not write path {path}", path=path) - else: - with f: - json.dump(value, f, indent=2, sort_keys=True) - - def _ensure_supporting_files(self): - """Create supporting files in the cache dir that are not really part of the cache.""" - readme_path = self._cachedir / "README.md" - readme_path.write_text(README_CONTENT) - - gitignore_path = self._cachedir.joinpath(".gitignore") - msg = u"# Created by pytest automatically.\n*" - gitignore_path.write_text(msg, encoding="UTF-8") - - cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") - cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) - - -class LFPlugin(object): - """ Plugin which implements the --lf (run last-failing) option """ - - def __init__(self, config): - self.config = config - active_keys = "lf", "failedfirst" - self.active = any(config.getoption(key) for key in active_keys) - self.lastfailed = config.cache.get("cache/lastfailed", {}) - self._previously_failed_count = None - self._report_status = None - self._skipped_files = 0 # count skipped files during collection due to --lf - - def last_failed_paths(self): - """Returns a set with all Paths()s of the previously failed nodeids (cached). - """ - try: - return self._last_failed_paths - except AttributeError: - rootpath = Path(self.config.rootdir) - result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed} - result = {x for x in result if x.exists()} - self._last_failed_paths = result - return result - - def pytest_ignore_collect(self, path): - """ - Ignore this file path if we are in --lf mode and it is not in the list of - previously failed files. - """ - if self.active and self.config.getoption("lf") and path.isfile(): - last_failed_paths = self.last_failed_paths() - if last_failed_paths: - skip_it = Path(path) not in self.last_failed_paths() - if skip_it: - self._skipped_files += 1 - return skip_it - - def pytest_report_collectionfinish(self): - if self.active and self.config.getoption("verbose") >= 0: - return "run-last-failure: %s" % self._report_status - - def pytest_runtest_logreport(self, report): - if (report.when == "call" and report.passed) or report.skipped: - self.lastfailed.pop(report.nodeid, None) - elif report.failed: - self.lastfailed[report.nodeid] = True - - def pytest_collectreport(self, report): - passed = report.outcome in ("passed", "skipped") - if passed: - if report.nodeid in self.lastfailed: - self.lastfailed.pop(report.nodeid) - self.lastfailed.update((item.nodeid, True) for item in report.result) - else: - self.lastfailed[report.nodeid] = True - - def pytest_collection_modifyitems(self, session, config, items): - if not self.active: - return - - if self.lastfailed: - previously_failed = [] - previously_passed = [] - for item in items: - if item.nodeid in self.lastfailed: - previously_failed.append(item) - else: - previously_passed.append(item) - self._previously_failed_count = len(previously_failed) - - if not previously_failed: - # Running a subset of all tests with recorded failures - # only outside of it. - self._report_status = "%d known failures not in selected tests" % ( - len(self.lastfailed), - ) - else: - if self.config.getoption("lf"): - items[:] = previously_failed - config.hook.pytest_deselected(items=previously_passed) - else: # --failedfirst - items[:] = previously_failed + previously_passed - - noun = "failure" if self._previously_failed_count == 1 else "failures" - suffix = " first" if self.config.getoption("failedfirst") else "" - self._report_status = "rerun previous {count} {noun}{suffix}".format( - count=self._previously_failed_count, suffix=suffix, noun=noun - ) - - if self._skipped_files > 0: - files_noun = "file" if self._skipped_files == 1 else "files" - self._report_status += " (skipped {files} {files_noun})".format( - files=self._skipped_files, files_noun=files_noun - ) - else: - self._report_status = "no previously failed tests, " - if self.config.getoption("last_failed_no_failures") == "none": - self._report_status += "deselecting all items." - config.hook.pytest_deselected(items=items) - items[:] = [] - else: - self._report_status += "not deselecting items." - - def pytest_sessionfinish(self, session): - config = self.config - if config.getoption("cacheshow") or hasattr(config, "slaveinput"): - return - - saved_lastfailed = config.cache.get("cache/lastfailed", {}) - if saved_lastfailed != self.lastfailed: - config.cache.set("cache/lastfailed", self.lastfailed) - - -class NFPlugin(object): - """ Plugin which implements the --nf (run new-first) option """ - - def __init__(self, config): - self.config = config - self.active = config.option.newfirst - self.cached_nodeids = config.cache.get("cache/nodeids", []) - - def pytest_collection_modifyitems(self, session, config, items): - if self.active: - new_items = OrderedDict() - other_items = OrderedDict() - for item in items: - if item.nodeid not in self.cached_nodeids: - new_items[item.nodeid] = item - else: - other_items[item.nodeid] = item - - items[:] = self._get_increasing_order( - six.itervalues(new_items) - ) + self._get_increasing_order(six.itervalues(other_items)) - self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] - - def _get_increasing_order(self, items): - return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) - - def pytest_sessionfinish(self, session): - config = self.config - if config.getoption("cacheshow") or hasattr(config, "slaveinput"): - return - - config.cache.set("cache/nodeids", self.cached_nodeids) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--lf", - "--last-failed", - action="store_true", - dest="lf", - help="rerun only the tests that failed " - "at the last run (or all if none failed)", - ) - group.addoption( - "--ff", - "--failed-first", - action="store_true", - dest="failedfirst", - help="run all tests but run the last failures first. " - "This may re-order tests and thus lead to " - "repeated fixture setup/teardown", - ) - group.addoption( - "--nf", - "--new-first", - action="store_true", - dest="newfirst", - help="run tests from new files first, then the rest of the tests " - "sorted by file mtime", - ) - group.addoption( - "--cache-show", - action="append", - nargs="?", - dest="cacheshow", - help=( - "show cache contents, don't perform collection or tests. " - "Optional argument: glob (default: '*')." - ), - ) - group.addoption( - "--cache-clear", - action="store_true", - dest="cacheclear", - help="remove all cache contents at start of test run.", - ) - cache_dir_default = ".pytest_cache" - if "TOX_ENV_DIR" in os.environ: - cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) - parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.") - group.addoption( - "--lfnf", - "--last-failed-no-failures", - action="store", - dest="last_failed_no_failures", - choices=("all", "none"), - default="all", - help="which tests to run with no previously (known) failures.", - ) - - -def pytest_cmdline_main(config): - if config.option.cacheshow: - from _pytest.main import wrap_session - - return wrap_session(config, cacheshow) - - -@pytest.hookimpl(tryfirst=True) -def pytest_configure(config): - config.cache = Cache.for_config(config) - config.pluginmanager.register(LFPlugin(config), "lfplugin") - config.pluginmanager.register(NFPlugin(config), "nfplugin") - - -@pytest.fixture -def cache(request): - """ - Return a cache object that can persist state between testing sessions. - - cache.get(key, default) - cache.set(key, value) - - Keys must be a ``/`` separated value, where the first part is usually the - name of your plugin or application to avoid clashes with other cache users. - - Values can be any object handled by the json stdlib module. - """ - return request.config.cache - - -def pytest_report_header(config): - """Display cachedir with --cache-show and if non-default.""" - if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": - cachedir = config.cache._cachedir - # TODO: evaluate generating upward relative paths - # starting with .., ../.. if sensible - - try: - displaypath = cachedir.relative_to(config.rootdir) - except ValueError: - displaypath = cachedir - return "cachedir: {}".format(displaypath) - - -def cacheshow(config, session): - from pprint import pformat - - tw = py.io.TerminalWriter() - tw.line("cachedir: " + str(config.cache._cachedir)) - if not config.cache._cachedir.is_dir(): - tw.line("cache is empty") - return 0 - - glob = config.option.cacheshow[0] - if glob is None: - glob = "*" - - dummy = object() - basedir = config.cache._cachedir - vdir = basedir / "v" - tw.sep("-", "cache values for %r" % glob) - for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): - key = valpath.relative_to(vdir) - val = config.cache.get(key, dummy) - if val is dummy: - tw.line("%s contains unreadable content, will be ignored" % key) - else: - tw.line("%s contains:" % key) - for line in pformat(val).splitlines(): - tw.line(" " + line) - - ddir = basedir / "d" - if ddir.is_dir(): - contents = sorted(ddir.rglob(glob)) - tw.sep("-", "cache directories for %r" % glob) - for p in contents: - # if p.check(dir=1): - # print("%s/" % p.relto(basedir)) - if p.is_file(): - key = p.relative_to(basedir) - tw.line("{} is a file of length {:d}".format(key, p.stat().st_size)) - return 0 diff --git a/contrib/python/pytest/py2/_pytest/capture.py b/contrib/python/pytest/py2/_pytest/capture.py deleted file mode 100644 index 68c17772f3..0000000000 --- a/contrib/python/pytest/py2/_pytest/capture.py +++ /dev/null @@ -1,850 +0,0 @@ -# -*- coding: utf-8 -*- -""" -per-test stdout/stderr capturing mechanism. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import contextlib -import io -import os -import sys -from io import UnsupportedOperation -from tempfile import TemporaryFile - -import six - -import pytest -from _pytest.compat import _PY3 -from _pytest.compat import CaptureIO - -patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "--capture", - action="store", - default="fd" if hasattr(os, "dup") else "sys", - metavar="method", - choices=["fd", "sys", "no"], - help="per-test capturing method: one of fd|sys|no.", - ) - group._addoption( - "-s", - action="store_const", - const="no", - dest="capture", - help="shortcut for --capture=no.", - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_load_initial_conftests(early_config, parser, args): - ns = early_config.known_args_namespace - if ns.capture == "fd": - _py36_windowsconsoleio_workaround(sys.stdout) - _colorama_workaround() - _readline_workaround() - pluginmanager = early_config.pluginmanager - capman = CaptureManager(ns.capture) - pluginmanager.register(capman, "capturemanager") - - # make sure that capturemanager is properly reset at final shutdown - early_config.add_cleanup(capman.stop_global_capturing) - - # finally trigger conftest loading but while capturing (issue93) - capman.start_global_capturing() - outcome = yield - capman.suspend_global_capture() - if outcome.excinfo is not None: - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - -class CaptureManager(object): - """ - Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each - test phase (setup, call, teardown). After each of those points, the captured output is obtained and - attached to the collection/runtest report. - - There are two levels of capture: - * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled - during collection and each test phase. - * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this - case special handling is needed to ensure the fixtures take precedence over the global capture. - """ - - def __init__(self, method): - self._method = method - self._global_capturing = None - self._current_item = None - - def __repr__(self): - return "<CaptureManager _method=%r _global_capturing=%r _current_item=%r>" % ( - self._method, - self._global_capturing, - self._current_item, - ) - - def _getcapture(self, method): - if method == "fd": - return MultiCapture(out=True, err=True, Capture=FDCapture) - elif method == "sys": - return MultiCapture(out=True, err=True, Capture=SysCapture) - elif method == "no": - return MultiCapture(out=False, err=False, in_=False) - raise ValueError("unknown capturing method: %r" % method) # pragma: no cover - - def is_capturing(self): - if self.is_globally_capturing(): - return "global" - capture_fixture = getattr(self._current_item, "_capture_fixture", None) - if capture_fixture is not None: - return ( - "fixture %s" % self._current_item._capture_fixture.request.fixturename - ) - return False - - # Global capturing control - - def is_globally_capturing(self): - return self._method != "no" - - def start_global_capturing(self): - assert self._global_capturing is None - self._global_capturing = self._getcapture(self._method) - self._global_capturing.start_capturing() - - def stop_global_capturing(self): - if self._global_capturing is not None: - self._global_capturing.pop_outerr_to_orig() - self._global_capturing.stop_capturing() - self._global_capturing = None - - def resume_global_capture(self): - # During teardown of the python process, and on rare occasions, capture - # attributes can be `None` while trying to resume global capture. - if self._global_capturing is not None: - self._global_capturing.resume_capturing() - - def suspend_global_capture(self, in_=False): - cap = getattr(self, "_global_capturing", None) - if cap is not None: - cap.suspend_capturing(in_=in_) - - def suspend(self, in_=False): - # Need to undo local capsys-et-al if it exists before disabling global capture. - self.suspend_fixture(self._current_item) - self.suspend_global_capture(in_) - - def resume(self): - self.resume_global_capture() - self.resume_fixture(self._current_item) - - def read_global_capture(self): - return self._global_capturing.readouterr() - - # Fixture Control (it's just forwarding, think about removing this later) - - def activate_fixture(self, item): - """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over - the global capture. - """ - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._start() - - def deactivate_fixture(self, item): - """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture.close() - - def suspend_fixture(self, item): - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._suspend() - - def resume_fixture(self, item): - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._resume() - - # Helper context managers - - @contextlib.contextmanager - def global_and_fixture_disabled(self): - """Context manager to temporarily disable global and current fixture capturing.""" - self.suspend() - try: - yield - finally: - self.resume() - - @contextlib.contextmanager - def item_capture(self, when, item): - self.resume_global_capture() - self.activate_fixture(item) - try: - yield - finally: - self.deactivate_fixture(item) - self.suspend_global_capture(in_=False) - - out, err = self.read_global_capture() - item.add_report_section(when, "stdout", out) - item.add_report_section(when, "stderr", err) - - # Hooks - - @pytest.hookimpl(hookwrapper=True) - def pytest_make_collect_report(self, collector): - if isinstance(collector, pytest.File): - self.resume_global_capture() - outcome = yield - self.suspend_global_capture() - out, err = self.read_global_capture() - rep = outcome.get_result() - if out: - rep.sections.append(("Captured stdout", out)) - if err: - rep.sections.append(("Captured stderr", err)) - else: - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_protocol(self, item): - self._current_item = item - yield - self._current_item = None - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_setup(self, item): - with self.item_capture("setup", item): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_call(self, item): - with self.item_capture("call", item): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_teardown(self, item): - with self.item_capture("teardown", item): - yield - - @pytest.hookimpl(tryfirst=True) - def pytest_keyboard_interrupt(self, excinfo): - self.stop_global_capturing() - - @pytest.hookimpl(tryfirst=True) - def pytest_internalerror(self, excinfo): - self.stop_global_capturing() - - -capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} - - -def _ensure_only_one_capture_fixture(request, name): - fixtures = set(request.fixturenames) & capture_fixtures - {name} - if fixtures: - fixtures = sorted(fixtures) - fixtures = fixtures[0] if len(fixtures) == 1 else fixtures - raise request.raiseerror( - "cannot use {} and {} at the same time".format(fixtures, name) - ) - - -@pytest.fixture -def capsys(request): - """Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. - - The captured output is made available via ``capsys.readouterr()`` method - calls, which return a ``(out, err)`` namedtuple. - ``out`` and ``err`` will be ``text`` objects. - """ - _ensure_only_one_capture_fixture(request, "capsys") - with _install_capture_fixture_on_item(request, SysCapture) as fixture: - yield fixture - - -@pytest.fixture -def capsysbinary(request): - """Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. - - The captured output is made available via ``capsysbinary.readouterr()`` - method calls, which return a ``(out, err)`` namedtuple. - ``out`` and ``err`` will be ``bytes`` objects. - """ - _ensure_only_one_capture_fixture(request, "capsysbinary") - # Currently, the implementation uses the python3 specific `.buffer` - # property of CaptureIO. - if sys.version_info < (3,): - raise request.raiseerror("capsysbinary is only supported on Python 3") - with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: - yield fixture - - -@pytest.fixture -def capfd(request): - """Enable text capturing of writes to file descriptors ``1`` and ``2``. - - The captured output is made available via ``capfd.readouterr()`` method - calls, which return a ``(out, err)`` namedtuple. - ``out`` and ``err`` will be ``text`` objects. - """ - _ensure_only_one_capture_fixture(request, "capfd") - if not hasattr(os, "dup"): - pytest.skip( - "capfd fixture needs os.dup function which is not available in this system" - ) - with _install_capture_fixture_on_item(request, FDCapture) as fixture: - yield fixture - - -@pytest.fixture -def capfdbinary(request): - """Enable bytes capturing of writes to file descriptors ``1`` and ``2``. - - The captured output is made available via ``capfd.readouterr()`` method - calls, which return a ``(out, err)`` namedtuple. - ``out`` and ``err`` will be ``byte`` objects. - """ - _ensure_only_one_capture_fixture(request, "capfdbinary") - if not hasattr(os, "dup"): - pytest.skip( - "capfdbinary fixture needs os.dup function which is not available in this system" - ) - with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: - yield fixture - - -@contextlib.contextmanager -def _install_capture_fixture_on_item(request, capture_class): - """ - Context manager which creates a ``CaptureFixture`` instance and "installs" it on - the item/node of the given request. Used by ``capsys`` and ``capfd``. - - The CaptureFixture is added as attribute of the item because it needs to accessed - by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. - """ - request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) - capmanager = request.config.pluginmanager.getplugin("capturemanager") - # Need to active this fixture right away in case it is being used by another fixture (setup phase). - # If this fixture is being used only by a test function (call phase), then we wouldn't need this - # activation, but it doesn't hurt. - capmanager.activate_fixture(request.node) - yield fixture - fixture.close() - del request.node._capture_fixture - - -class CaptureFixture(object): - """ - Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` - fixtures. - """ - - def __init__(self, captureclass, request): - self.captureclass = captureclass - self.request = request - self._capture = None - self._captured_out = self.captureclass.EMPTY_BUFFER - self._captured_err = self.captureclass.EMPTY_BUFFER - - def _start(self): - if self._capture is None: - self._capture = MultiCapture( - out=True, err=True, in_=False, Capture=self.captureclass - ) - self._capture.start_capturing() - - def close(self): - if self._capture is not None: - out, err = self._capture.pop_outerr_to_orig() - self._captured_out += out - self._captured_err += err - self._capture.stop_capturing() - self._capture = None - - def readouterr(self): - """Read and return the captured output so far, resetting the internal buffer. - - :return: captured content as a namedtuple with ``out`` and ``err`` string attributes - """ - captured_out, captured_err = self._captured_out, self._captured_err - if self._capture is not None: - out, err = self._capture.readouterr() - captured_out += out - captured_err += err - self._captured_out = self.captureclass.EMPTY_BUFFER - self._captured_err = self.captureclass.EMPTY_BUFFER - return CaptureResult(captured_out, captured_err) - - def _suspend(self): - """Suspends this fixture's own capturing temporarily.""" - if self._capture is not None: - self._capture.suspend_capturing() - - def _resume(self): - """Resumes this fixture's own capturing temporarily.""" - if self._capture is not None: - self._capture.resume_capturing() - - @contextlib.contextmanager - def disabled(self): - """Temporarily disables capture while inside the 'with' block.""" - capmanager = self.request.config.pluginmanager.getplugin("capturemanager") - with capmanager.global_and_fixture_disabled(): - yield - - -def safe_text_dupfile(f, mode, default_encoding="UTF8"): - """ return an open text file object that's a duplicate of f on the - FD-level if possible. - """ - encoding = getattr(f, "encoding", None) - try: - fd = f.fileno() - except Exception: - if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): - # we seem to have a text stream, let's just use it - return f - else: - newfd = os.dup(fd) - if "b" not in mode: - mode += "b" - f = os.fdopen(newfd, mode, 0) # no buffering - return EncodedFile(f, encoding or default_encoding) - - -class EncodedFile(object): - errors = "strict" # possibly needed by py3 code (issue555) - - def __init__(self, buffer, encoding): - self.buffer = buffer - self.encoding = encoding - - def write(self, obj): - if isinstance(obj, six.text_type): - obj = obj.encode(self.encoding, "replace") - elif _PY3: - raise TypeError( - "write() argument must be str, not {}".format(type(obj).__name__) - ) - self.buffer.write(obj) - - def writelines(self, linelist): - data = "".join(linelist) - self.write(data) - - @property - def name(self): - """Ensure that file.name is a string.""" - return repr(self.buffer) - - @property - def mode(self): - return self.buffer.mode.replace("b", "") - - def __getattr__(self, name): - return getattr(object.__getattribute__(self, "buffer"), name) - - -CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) - - -class MultiCapture(object): - out = err = in_ = None - _state = None - - def __init__(self, out=True, err=True, in_=True, Capture=None): - if in_: - self.in_ = Capture(0) - if out: - self.out = Capture(1) - if err: - self.err = Capture(2) - - def __repr__(self): - return "<MultiCapture out=%r err=%r in_=%r _state=%r _in_suspended=%r>" % ( - self.out, - self.err, - self.in_, - self._state, - getattr(self, "_in_suspended", "<UNSET>"), - ) - - def start_capturing(self): - self._state = "started" - if self.in_: - self.in_.start() - if self.out: - self.out.start() - if self.err: - self.err.start() - - def pop_outerr_to_orig(self): - """ pop current snapshot out/err capture and flush to orig streams. """ - out, err = self.readouterr() - if out: - self.out.writeorg(out) - if err: - self.err.writeorg(err) - return out, err - - def suspend_capturing(self, in_=False): - self._state = "suspended" - if self.out: - self.out.suspend() - if self.err: - self.err.suspend() - if in_ and self.in_: - self.in_.suspend() - self._in_suspended = True - - def resume_capturing(self): - self._state = "resumed" - if self.out: - self.out.resume() - if self.err: - self.err.resume() - if hasattr(self, "_in_suspended"): - self.in_.resume() - del self._in_suspended - - def stop_capturing(self): - """ stop capturing and reset capturing streams """ - if self._state == "stopped": - raise ValueError("was already stopped") - self._state = "stopped" - if self.out: - self.out.done() - if self.err: - self.err.done() - if self.in_: - self.in_.done() - - def readouterr(self): - """ return snapshot unicode value of stdout/stderr capturings. """ - return CaptureResult( - self.out.snap() if self.out is not None else "", - self.err.snap() if self.err is not None else "", - ) - - -class NoCapture(object): - EMPTY_BUFFER = None - __init__ = start = done = suspend = resume = lambda *args: None - - -class FDCaptureBinary(object): - """Capture IO to/from a given os-level filedescriptor. - - snap() produces `bytes` - """ - - EMPTY_BUFFER = b"" - _state = None - - def __init__(self, targetfd, tmpfile=None): - self.targetfd = targetfd - try: - self.targetfd_save = os.dup(self.targetfd) - except OSError: - self.start = lambda: None - self.done = lambda: None - else: - if targetfd == 0: - assert not tmpfile, "cannot set tmpfile with stdin" - tmpfile = open(os.devnull, "r") - self.syscapture = SysCapture(targetfd) - else: - if tmpfile is None: - f = TemporaryFile() - with f: - tmpfile = safe_text_dupfile(f, mode="wb+") - if targetfd in patchsysdict: - self.syscapture = SysCapture(targetfd, tmpfile) - else: - self.syscapture = NoCapture() - self.tmpfile = tmpfile - self.tmpfile_fd = tmpfile.fileno() - - def __repr__(self): - return "<FDCapture %s oldfd=%s _state=%r>" % ( - self.targetfd, - getattr(self, "targetfd_save", None), - self._state, - ) - - def start(self): - """ Start capturing on targetfd using memorized tmpfile. """ - try: - os.fstat(self.targetfd_save) - except (AttributeError, OSError): - raise ValueError("saved filedescriptor not valid anymore") - os.dup2(self.tmpfile_fd, self.targetfd) - self.syscapture.start() - self._state = "started" - - def snap(self): - self.tmpfile.seek(0) - res = self.tmpfile.read() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - def done(self): - """ stop capturing, restore streams, return original capture file, - seeked to position zero. """ - targetfd_save = self.__dict__.pop("targetfd_save") - os.dup2(targetfd_save, self.targetfd) - os.close(targetfd_save) - self.syscapture.done() - _attempt_to_close_capture_file(self.tmpfile) - self._state = "done" - - def suspend(self): - self.syscapture.suspend() - os.dup2(self.targetfd_save, self.targetfd) - self._state = "suspended" - - def resume(self): - self.syscapture.resume() - os.dup2(self.tmpfile_fd, self.targetfd) - self._state = "resumed" - - def writeorg(self, data): - """ write to original file descriptor. """ - if isinstance(data, six.text_type): - data = data.encode("utf8") # XXX use encoding of original stream - os.write(self.targetfd_save, data) - - -class FDCapture(FDCaptureBinary): - """Capture IO to/from a given os-level filedescriptor. - - snap() produces text - """ - - EMPTY_BUFFER = str() - - def snap(self): - res = super(FDCapture, self).snap() - enc = getattr(self.tmpfile, "encoding", None) - if enc and isinstance(res, bytes): - res = six.text_type(res, enc, "replace") - return res - - -class SysCapture(object): - - EMPTY_BUFFER = str() - _state = None - - def __init__(self, fd, tmpfile=None): - name = patchsysdict[fd] - self._old = getattr(sys, name) - self.name = name - if tmpfile is None: - if name == "stdin": - tmpfile = DontReadFromInput() - else: - tmpfile = CaptureIO() - self.tmpfile = tmpfile - - def __repr__(self): - return "<SysCapture %s _old=%r, tmpfile=%r _state=%r>" % ( - self.name, - self._old, - self.tmpfile, - self._state, - ) - - def start(self): - setattr(sys, self.name, self.tmpfile) - self._state = "started" - - def snap(self): - res = self.tmpfile.getvalue() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - def done(self): - setattr(sys, self.name, self._old) - del self._old - _attempt_to_close_capture_file(self.tmpfile) - self._state = "done" - - def suspend(self): - setattr(sys, self.name, self._old) - self._state = "suspended" - - def resume(self): - setattr(sys, self.name, self.tmpfile) - self._state = "resumed" - - def writeorg(self, data): - self._old.write(data) - self._old.flush() - - -class SysCaptureBinary(SysCapture): - EMPTY_BUFFER = b"" - - def snap(self): - res = self.tmpfile.buffer.getvalue() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - -class DontReadFromInput(six.Iterator): - """Temporary stub class. Ideally when stdin is accessed, the - capturing should be turned off, with possibly all data captured - so far sent to the screen. This should be configurable, though, - because in automated test runs it is better to crash than - hang indefinitely. - """ - - encoding = None - - def read(self, *args): - raise IOError("reading from stdin while output is captured") - - readline = read - readlines = read - __next__ = read - - def __iter__(self): - return self - - def fileno(self): - raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") - - def isatty(self): - return False - - def close(self): - pass - - @property - def buffer(self): - if sys.version_info >= (3, 0): - return self - else: - raise AttributeError("redirected stdin has no attribute buffer") - - -def _colorama_workaround(): - """ - Ensure colorama is imported so that it attaches to the correct stdio - handles on Windows. - - colorama uses the terminal on import time. So if something does the - first import of colorama while I/O capture is active, colorama will - fail in various ways. - """ - if sys.platform.startswith("win32"): - try: - import colorama # noqa: F401 - except ImportError: - pass - - -def _readline_workaround(): - """ - Ensure readline is imported so that it attaches to the correct stdio - handles on Windows. - - Pdb uses readline support where available--when not running from the Python - prompt, the readline module is not imported until running the pdb REPL. If - running pytest with the --pdb option this means the readline module is not - imported until after I/O capture has been started. - - This is a problem for pyreadline, which is often used to implement readline - support on Windows, as it does not attach to the correct handles for stdout - and/or stdin if they have been redirected by the FDCapture mechanism. This - workaround ensures that readline is imported before I/O capture is setup so - that it can attach to the actual stdin/out for the console. - - See https://github.com/pytest-dev/pytest/pull/1281 - """ - if sys.platform.startswith("win32"): - try: - import readline # noqa: F401 - except ImportError: - pass - - -def _py36_windowsconsoleio_workaround(stream): - """ - Python 3.6 implemented unicode console handling for Windows. This works - by reading/writing to the raw console handle using - ``{Read,Write}ConsoleW``. - - The problem is that we are going to ``dup2`` over the stdio file - descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the - handles used by Python to write to the console. Though there is still some - weirdness and the console handle seems to only be closed randomly and not - on the first call to ``CloseHandle``, or maybe it gets reopened with the - same handle value when we suspend capturing. - - The workaround in this case will reopen stdio with a different fd which - also means a different handle by replicating the logic in - "Py_lifecycle.c:initstdio/create_stdio". - - :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given - here as parameter for unittesting purposes. - - See https://github.com/pytest-dev/py/issues/103 - """ - if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): - return - - # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) - if not hasattr(stream, "buffer"): - return - - buffered = hasattr(stream.buffer, "raw") - raw_stdout = stream.buffer.raw if buffered else stream.buffer - - if not isinstance(raw_stdout, io._WindowsConsoleIO): - return - - def _reopen_stdio(f, mode): - if not buffered and mode[0] == "w": - buffering = 0 - else: - buffering = -1 - - return io.TextIOWrapper( - open(os.dup(f.fileno()), mode, buffering), - f.encoding, - f.errors, - f.newlines, - f.line_buffering, - ) - - sys.stdin = _reopen_stdio(sys.stdin, "rb") - sys.stdout = _reopen_stdio(sys.stdout, "wb") - sys.stderr = _reopen_stdio(sys.stderr, "wb") - - -def _attempt_to_close_capture_file(f): - """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" - if six.PY2: - try: - f.close() - except IOError: - pass - else: - f.close() diff --git a/contrib/python/pytest/py2/_pytest/compat.py b/contrib/python/pytest/py2/_pytest/compat.py deleted file mode 100644 index e5c3b84667..0000000000 --- a/contrib/python/pytest/py2/_pytest/compat.py +++ /dev/null @@ -1,473 +0,0 @@ -# -*- coding: utf-8 -*- -""" -python version compatibility code -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import codecs -import functools -import inspect -import re -import sys -from contextlib import contextmanager - -import attr -import py -import six -from six import text_type - -import _pytest -from _pytest._io.saferepr import saferepr -from _pytest.outcomes import fail -from _pytest.outcomes import TEST_OUTCOME - -try: - import enum -except ImportError: # pragma: no cover - # Only available in Python 3.4+ or as a backport - enum = None - -_PY3 = sys.version_info > (3, 0) -_PY2 = not _PY3 - - -if _PY3: - from inspect import signature, Parameter as Parameter -else: - from funcsigs import signature, Parameter as Parameter - -NOTSET = object() - -PY35 = sys.version_info[:2] >= (3, 5) -PY36 = sys.version_info[:2] >= (3, 6) -MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" - - -if _PY3: - from collections.abc import MutableMapping as MappingMixin - from collections.abc import Iterable, Mapping, Sequence, Sized -else: - # those raise DeprecationWarnings in Python >=3.7 - from collections import MutableMapping as MappingMixin # noqa - from collections import Iterable, Mapping, Sequence, Sized # noqa - - -if sys.version_info >= (3, 4): - from importlib.util import spec_from_file_location -else: - - def spec_from_file_location(*_, **__): - return None - - -if sys.version_info >= (3, 8): - from importlib import metadata as importlib_metadata # noqa -else: - import importlib_metadata # noqa - - -def _format_args(func): - return str(signature(func)) - - -isfunction = inspect.isfunction -isclass = inspect.isclass -# used to work around a python2 exception info leak -exc_clear = getattr(sys, "exc_clear", lambda: None) -# The type of re.compile objects is not exposed in Python. -REGEX_TYPE = type(re.compile("")) - - -def is_generator(func): - genfunc = inspect.isgeneratorfunction(func) - return genfunc and not iscoroutinefunction(func) - - -def iscoroutinefunction(func): - """Return True if func is a decorated coroutine function. - - Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, - which in turns also initializes the "logging" module as side-effect (see issue #8). - """ - return getattr(func, "_is_coroutine", False) or ( - hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func) - ) - - -def getlocation(function, curdir): - function = get_real_func(function) - fn = py.path.local(inspect.getfile(function)) - lineno = function.__code__.co_firstlineno - if fn.relto(curdir): - fn = fn.relto(curdir) - return "%s:%d" % (fn, lineno + 1) - - -def num_mock_patch_args(function): - """ return number of arguments used up by mock arguments (if any) """ - patchings = getattr(function, "patchings", None) - if not patchings: - return 0 - mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] - if any(mock_modules): - sentinels = [m.DEFAULT for m in mock_modules if m is not None] - return len( - [p for p in patchings if not p.attribute_name and p.new in sentinels] - ) - return len(patchings) - - -def getfuncargnames(function, is_method=False, cls=None): - """Returns the names of a function's mandatory arguments. - - This should return the names of all function arguments that: - * Aren't bound to an instance or type as in instance or class methods. - * Don't have default values. - * Aren't bound with functools.partial. - * Aren't replaced with mocks. - - The is_method and cls arguments indicate that the function should - be treated as a bound method even though it's not unless, only in - the case of cls, the function is a static method. - - @RonnyPfannschmidt: This function should be refactored when we - revisit fixtures. The fixture mechanism should ask the node for - the fixture names, and not try to obtain directly from the - function object well after collection has occurred. - - """ - # The parameters attribute of a Signature object contains an - # ordered mapping of parameter names to Parameter instances. This - # creates a tuple of the names of the parameters that don't have - # defaults. - try: - parameters = signature(function).parameters - except (ValueError, TypeError) as e: - fail( - "Could not determine arguments of {!r}: {}".format(function, e), - pytrace=False, - ) - - arg_names = tuple( - p.name - for p in parameters.values() - if ( - p.kind is Parameter.POSITIONAL_OR_KEYWORD - or p.kind is Parameter.KEYWORD_ONLY - ) - and p.default is Parameter.empty - ) - # If this function should be treated as a bound method even though - # it's passed as an unbound method or function, remove the first - # parameter name. - if is_method or ( - cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) - ): - arg_names = arg_names[1:] - # Remove any names that will be replaced with mocks. - if hasattr(function, "__wrapped__"): - arg_names = arg_names[num_mock_patch_args(function) :] - return arg_names - - -@contextmanager -def dummy_context_manager(): - """Context manager that does nothing, useful in situations where you might need an actual context manager or not - depending on some condition. Using this allow to keep the same code""" - yield - - -def get_default_arg_names(function): - # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, - # to get the arguments which were excluded from its result because they had default values - return tuple( - p.name - for p in signature(function).parameters.values() - if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) - and p.default is not Parameter.empty - ) - - -_non_printable_ascii_translate_table = { - i: u"\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127) -} -_non_printable_ascii_translate_table.update( - {ord("\t"): u"\\t", ord("\r"): u"\\r", ord("\n"): u"\\n"} -) - - -def _translate_non_printable(s): - return s.translate(_non_printable_ascii_translate_table) - - -if _PY3: - STRING_TYPES = bytes, str - UNICODE_TYPES = six.text_type - - if PY35: - - def _bytes_to_ascii(val): - return val.decode("ascii", "backslashreplace") - - else: - - def _bytes_to_ascii(val): - if val: - # source: http://goo.gl/bGsnwC - encoded_bytes, _ = codecs.escape_encode(val) - return encoded_bytes.decode("ascii") - else: - # empty bytes crashes codecs.escape_encode (#1087) - return "" - - def ascii_escaped(val): - """If val is pure ascii, returns it as a str(). Otherwise, escapes - bytes objects into a sequence of escaped bytes: - - b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' - - and escapes unicode objects into a sequence of escaped unicode - ids, e.g.: - - '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' - - note: - the obvious "v.decode('unicode-escape')" will return - valid utf-8 unicode if it finds them in bytes, but we - want to return escaped bytes for any byte, even if they match - a utf-8 string. - - """ - if isinstance(val, bytes): - ret = _bytes_to_ascii(val) - else: - ret = val - return ret - - -else: - STRING_TYPES = six.string_types - UNICODE_TYPES = six.text_type - - def ascii_escaped(val): - """In py2 bytes and str are the same type, so return if it's a bytes - object, return it unchanged if it is a full ascii string, - otherwise escape it into its binary form. - - If it's a unicode string, change the unicode characters into - unicode escapes. - - """ - if isinstance(val, bytes): - try: - ret = val.decode("utf-8") - except UnicodeDecodeError: - ret = val.decode("utf-8", "ignore") - else: - ret = val.encode("utf-8", "replace").decode("utf-8") - return ret - - -class _PytestWrapper(object): - """Dummy wrapper around a function object for internal use only. - - Used to correctly unwrap the underlying function object - when we are creating fixtures, because we wrap the function object ourselves with a decorator - to issue warnings when the fixture function is called directly. - """ - - def __init__(self, obj): - self.obj = obj - - -def get_real_func(obj): - """ gets the real function object of the (possibly) wrapped object by - functools.wraps or functools.partial. - """ - start_obj = obj - for i in range(100): - # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function - # to trigger a warning if it gets called directly instead of by pytest: we don't - # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) - new_obj = getattr(obj, "__pytest_wrapped__", None) - if isinstance(new_obj, _PytestWrapper): - obj = new_obj.obj - break - new_obj = getattr(obj, "__wrapped__", None) - if new_obj is None: - break - obj = new_obj - else: - raise ValueError( - ("could not find real function of {start}\nstopped at {current}").format( - start=saferepr(start_obj), current=saferepr(obj) - ) - ) - if isinstance(obj, functools.partial): - obj = obj.func - return obj - - -def get_real_method(obj, holder): - """ - Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time - returning a bound method to ``holder`` if the original object was a bound method. - """ - try: - is_method = hasattr(obj, "__func__") - obj = get_real_func(obj) - except Exception: - return obj - if is_method and hasattr(obj, "__get__") and callable(obj.__get__): - obj = obj.__get__(holder) - return obj - - -def getfslineno(obj): - # xxx let decorators etc specify a sane ordering - obj = get_real_func(obj) - if hasattr(obj, "place_as"): - obj = obj.place_as - fslineno = _pytest._code.getfslineno(obj) - assert isinstance(fslineno[1], int), obj - return fslineno - - -def getimfunc(func): - try: - return func.__func__ - except AttributeError: - return func - - -def safe_getattr(object, name, default): - """ Like getattr but return default upon any Exception or any OutcomeException. - - Attribute access can potentially fail for 'evil' Python objects. - See issue #214. - It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException - instead of Exception (for more details check #2707) - """ - try: - return getattr(object, name, default) - except TEST_OUTCOME: - return default - - -def safe_isclass(obj): - """Ignore any exception via isinstance on Python 3.""" - try: - return isclass(obj) - except Exception: - return False - - -def _is_unittest_unexpected_success_a_failure(): - """Return if the test suite should fail if an @expectedFailure unittest test PASSES. - - From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: - Changed in version 3.4: Returns False if there were any - unexpectedSuccesses from tests marked with the expectedFailure() decorator. - """ - return sys.version_info >= (3, 4) - - -if _PY3: - - def safe_str(v): - """returns v as string""" - try: - return str(v) - except UnicodeEncodeError: - return str(v, encoding="utf-8") - - -else: - - def safe_str(v): - """returns v as string, converting to utf-8 if necessary""" - try: - return str(v) - except UnicodeError: - if not isinstance(v, text_type): - v = text_type(v) - errors = "replace" - return v.encode("utf-8", errors) - - -COLLECT_FAKEMODULE_ATTRIBUTES = ( - "Collector", - "Module", - "Function", - "Instance", - "Session", - "Item", - "Class", - "File", - "_fillfuncargs", -) - - -def _setup_collect_fakemodule(): - from types import ModuleType - import pytest - - pytest.collect = ModuleType("pytest.collect") - pytest.collect.__all__ = [] # used for setns - for attribute in COLLECT_FAKEMODULE_ATTRIBUTES: - setattr(pytest.collect, attribute, getattr(pytest, attribute)) - - -if _PY2: - # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. - from py.io import TextIO - - class CaptureIO(TextIO): - @property - def encoding(self): - return getattr(self, "_encoding", "UTF-8") - - -else: - import io - - class CaptureIO(io.TextIOWrapper): - def __init__(self): - super(CaptureIO, self).__init__( - io.BytesIO(), encoding="UTF-8", newline="", write_through=True - ) - - def getvalue(self): - return self.buffer.getvalue().decode("UTF-8") - - -class FuncargnamesCompatAttr(object): - """ helper class so that Metafunc, Function and FixtureRequest - don't need to each define the "funcargnames" compatibility attribute. - """ - - @property - def funcargnames(self): - """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" - return self.fixturenames - - -if six.PY2: - - def lru_cache(*_, **__): - def dec(fn): - return fn - - return dec - - -else: - from functools import lru_cache # noqa: F401 - - -if getattr(attr, "__version_info__", ()) >= (19, 2): - ATTRS_EQ_FIELD = "eq" -else: - ATTRS_EQ_FIELD = "cmp" diff --git a/contrib/python/pytest/py2/_pytest/config/__init__.py b/contrib/python/pytest/py2/_pytest/config/__init__.py deleted file mode 100644 index 0737ff9d51..0000000000 --- a/contrib/python/pytest/py2/_pytest/config/__init__.py +++ /dev/null @@ -1,1203 +0,0 @@ -# -*- coding: utf-8 -*- -""" command line options, ini-file and conftest.py processing. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import copy -import inspect -import os -import shlex -import sys -import types -import warnings - -import attr -import py -import six -from packaging.version import Version -from pluggy import HookimplMarker -from pluggy import HookspecMarker -from pluggy import PluginManager - -import _pytest._code -import _pytest.assertion -import _pytest.hookspec # the extension point definitions -from .exceptions import PrintHelp -from .exceptions import UsageError -from .findpaths import determine_setup -from .findpaths import exists -from _pytest import deprecated -from _pytest._code import ExceptionInfo -from _pytest._code import filter_traceback -from _pytest.compat import importlib_metadata -from _pytest.compat import lru_cache -from _pytest.compat import safe_str -from _pytest.outcomes import fail -from _pytest.outcomes import Skipped -from _pytest.pathlib import Path -from _pytest.warning_types import PytestConfigWarning - -hookimpl = HookimplMarker("pytest") -hookspec = HookspecMarker("pytest") - - -class ConftestImportFailure(Exception): - def __init__(self, path, excinfo): - Exception.__init__(self, path, excinfo) - self.path = path - self.excinfo = excinfo - - -def main(args=None, plugins=None): - """ return exit code, after performing an in-process test run. - - :arg args: list of command line arguments. - - :arg plugins: list of plugin objects to be auto-registered during - initialization. - """ - from _pytest.main import EXIT_USAGEERROR - - try: - try: - config = _prepareconfig(args, plugins) - except ConftestImportFailure as e: - exc_info = ExceptionInfo(e.excinfo) - tw = py.io.TerminalWriter(sys.stderr) - tw.line( - "ImportError while loading conftest '{e.path}'.".format(e=e), red=True - ) - exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = ( - exc_info.getrepr(style="short", chain=False) - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = safe_str(exc_repr) - for line in formatted_tb.splitlines(): - tw.line(line.rstrip(), red=True) - return 4 - else: - try: - return config.hook.pytest_cmdline_main(config=config) - finally: - config._ensure_unconfigure() - except UsageError as e: - tw = py.io.TerminalWriter(sys.stderr) - for msg in e.args: - tw.line("ERROR: {}\n".format(msg), red=True) - return EXIT_USAGEERROR - - -class cmdline(object): # compatibility namespace - main = staticmethod(main) - - -def filename_arg(path, optname): - """ Argparse type validator for filename arguments. - - :path: path of filename - :optname: name of the option - """ - if os.path.isdir(path): - raise UsageError("{} must be a filename, given: {}".format(optname, path)) - return path - - -def directory_arg(path, optname): - """Argparse type validator for directory arguments. - - :path: path of directory - :optname: name of the option - """ - if not os.path.isdir(path): - raise UsageError("{} must be a directory, given: {}".format(optname, path)) - return path - - -# Plugins that cannot be disabled via "-p no:X" currently. -essential_plugins = ( - "mark", - "main", - "runner", - "fixtures", - "helpconfig", # Provides -p. -) - -default_plugins = essential_plugins + ( - "python", - "terminal", - "debugging", - "unittest", - "capture", - "skipping", - "tmpdir", - "monkeypatch", - "recwarn", - "pastebin", - "nose", - "assertion", - "junitxml", - "resultlog", - "doctest", - "cacheprovider", - "freeze_support", - "setuponly", - "setupplan", - "stepwise", - "warnings", - "logging", - "reports", -) - -builtin_plugins = set(default_plugins) -builtin_plugins.add("pytester") - - -def get_config(args=None, plugins=None): - # subsequent calls to main will create a fresh instance - pluginmanager = PytestPluginManager() - config = Config( - pluginmanager, - invocation_params=Config.InvocationParams( - args=args, plugins=plugins, dir=Path().resolve() - ), - ) - - if args is not None: - # Handle any "-p no:plugin" args. - pluginmanager.consider_preparse(args) - - for spec in default_plugins: - pluginmanager.import_plugin(spec) - return config - - -def get_plugin_manager(): - """ - Obtain a new instance of the - :py:class:`_pytest.config.PytestPluginManager`, with default plugins - already loaded. - - This function can be used by integration with other tools, like hooking - into pytest to run tests into an IDE. - """ - return get_config().pluginmanager - - -def _prepareconfig(args=None, plugins=None): - warning = None - if args is None: - args = sys.argv[1:] - elif isinstance(args, py.path.local): - args = [str(args)] - elif not isinstance(args, (tuple, list)): - msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})" - raise TypeError(msg.format(args, type(args))) - - config = get_config(args, plugins) - pluginmanager = config.pluginmanager - try: - if plugins: - for plugin in plugins: - if isinstance(plugin, six.string_types): - pluginmanager.consider_pluginarg(plugin) - else: - pluginmanager.register(plugin) - if warning: - from _pytest.warnings import _issue_warning_captured - - _issue_warning_captured(warning, hook=config.hook, stacklevel=4) - return pluginmanager.hook.pytest_cmdline_parse( - pluginmanager=pluginmanager, args=args - ) - except BaseException: - config._ensure_unconfigure() - raise - - -class PytestPluginManager(PluginManager): - """ - Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific - functionality: - - * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and - ``pytest_plugins`` global variables found in plugins being loaded; - * ``conftest.py`` loading during start-up; - """ - - def __init__(self): - super(PytestPluginManager, self).__init__("pytest") - self._conftest_plugins = set() - - # state related to local conftest plugins - self._dirpath2confmods = {} - self._conftestpath2mod = {} - self._confcutdir = None - self._noconftest = False - self._duplicatepaths = set() - - self.add_hookspecs(_pytest.hookspec) - self.register(self) - if os.environ.get("PYTEST_DEBUG"): - err = sys.stderr - encoding = getattr(err, "encoding", "utf8") - try: - err = py.io.dupfile(err, encoding=encoding) - except Exception: - pass - self.trace.root.setwriter(err.write) - self.enable_tracing() - - # Config._consider_importhook will set a real object if required. - self.rewrite_hook = _pytest.assertion.DummyRewriteHook() - # Used to know when we are importing conftests after the pytest_configure stage - self._configured = False - - def addhooks(self, module_or_class): - """ - .. deprecated:: 2.8 - - Use :py:meth:`pluggy.PluginManager.add_hookspecs <PluginManager.add_hookspecs>` - instead. - """ - warnings.warn(deprecated.PLUGIN_MANAGER_ADDHOOKS, stacklevel=2) - return self.add_hookspecs(module_or_class) - - def parse_hookimpl_opts(self, plugin, name): - # pytest hooks are always prefixed with pytest_ - # so we avoid accessing possibly non-readable attributes - # (see issue #1073) - if not name.startswith("pytest_"): - return - # ignore names which can not be hooks - if name == "pytest_plugins": - return - - method = getattr(plugin, name) - opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) - - # consider only actual functions for hooks (#3775) - if not inspect.isroutine(method): - return - - # collect unmarked hooks as long as they have the `pytest_' prefix - if opts is None and name.startswith("pytest_"): - opts = {} - if opts is not None: - # TODO: DeprecationWarning, people should use hookimpl - # https://github.com/pytest-dev/pytest/issues/4562 - known_marks = {m.name for m in getattr(method, "pytestmark", [])} - - for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): - opts.setdefault(name, hasattr(method, name) or name in known_marks) - return opts - - def parse_hookspec_opts(self, module_or_class, name): - opts = super(PytestPluginManager, self).parse_hookspec_opts( - module_or_class, name - ) - if opts is None: - method = getattr(module_or_class, name) - - if name.startswith("pytest_"): - # todo: deprecate hookspec hacks - # https://github.com/pytest-dev/pytest/issues/4562 - known_marks = {m.name for m in getattr(method, "pytestmark", [])} - opts = { - "firstresult": hasattr(method, "firstresult") - or "firstresult" in known_marks, - "historic": hasattr(method, "historic") - or "historic" in known_marks, - } - return opts - - def register(self, plugin, name=None): - if name in ["pytest_catchlog", "pytest_capturelog"]: - warnings.warn( - PytestConfigWarning( - "{} plugin has been merged into the core, " - "please remove it from your requirements.".format( - name.replace("_", "-") - ) - ) - ) - return - ret = super(PytestPluginManager, self).register(plugin, name) - if ret: - self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self) - ) - - if isinstance(plugin, types.ModuleType): - self.consider_module(plugin) - return ret - - def getplugin(self, name): - # support deprecated naming because plugins (xdist e.g.) use it - return self.get_plugin(name) - - def hasplugin(self, name): - """Return True if the plugin with the given name is registered.""" - return bool(self.get_plugin(name)) - - def pytest_configure(self, config): - # XXX now that the pluginmanager exposes hookimpl(tryfirst...) - # we should remove tryfirst/trylast as markers - config.addinivalue_line( - "markers", - "tryfirst: mark a hook implementation function such that the " - "plugin machinery will try to call it first/as early as possible.", - ) - config.addinivalue_line( - "markers", - "trylast: mark a hook implementation function such that the " - "plugin machinery will try to call it last/as late as possible.", - ) - self._configured = True - - # - # internal API for local conftest plugin handling - # - def _set_initial_conftests(self, namespace): - """ load initial conftest files given a preparsed "namespace". - As conftest files may add their own command line options - which have arguments ('--my-opt somepath') we might get some - false positives. All builtin and 3rd party plugins will have - been loaded, however, so common options will not confuse our logic - here. - """ - current = py.path.local() - self._confcutdir = ( - current.join(namespace.confcutdir, abs=True) - if namespace.confcutdir - else None - ) - self._noconftest = namespace.noconftest - self._using_pyargs = namespace.pyargs - testpaths = namespace.file_or_dir - foundanchor = False - for path in testpaths: - path = str(path) - # remove node-id syntax - i = path.find("::") - if i != -1: - path = path[:i] - anchor = current.join(path, abs=1) - if exists(anchor): # we found some file object - self._try_load_conftest(anchor) - foundanchor = True - if not foundanchor: - self._try_load_conftest(current) - - def _try_load_conftest(self, anchor): - self._getconftestmodules(anchor) - # let's also consider test* subdirs - if anchor.check(dir=1): - for x in anchor.listdir("test*"): - if x.check(dir=1): - self._getconftestmodules(x) - - @lru_cache(maxsize=128) - def _getconftestmodules(self, path): - if self._noconftest: - return [] - - if path.isfile(): - directory = path.dirpath() - else: - directory = path - - if six.PY2: # py2 is not using lru_cache. - try: - return self._dirpath2confmods[directory] - except KeyError: - pass - - # XXX these days we may rather want to use config.rootdir - # and allow users to opt into looking into the rootdir parent - # directories instead of requiring to specify confcutdir - clist = [] - for parent in directory.realpath().parts(): - if self._confcutdir and self._confcutdir.relto(parent): - continue - conftestpath = parent.join("conftest.py") - if conftestpath.isfile(): - # Use realpath to avoid loading the same conftest twice - # with build systems that create build directories containing - # symlinks to actual files. - mod = self._importconftest(conftestpath.realpath()) - clist.append(mod) - self._dirpath2confmods[directory] = clist - return clist - - def _rget_with_confmod(self, name, path): - modules = self._getconftestmodules(path) - for mod in reversed(modules): - try: - return mod, getattr(mod, name) - except AttributeError: - continue - raise KeyError(name) - - def _importconftest(self, conftestpath): - try: - return self._conftestpath2mod[conftestpath] - except KeyError: - pkgpath = conftestpath.pypkgpath() - if pkgpath is None: - _ensure_removed_sysmodule(conftestpath.purebasename) - try: - mod = conftestpath.pyimport() - if ( - hasattr(mod, "pytest_plugins") - and self._configured - and not self._using_pyargs - ): - from _pytest.deprecated import ( - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST, - ) - - fail( - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST.format( - conftestpath, self._confcutdir - ), - pytrace=False, - ) - except Exception: - raise ConftestImportFailure(conftestpath, sys.exc_info()) - - self._conftest_plugins.add(mod) - self._conftestpath2mod[conftestpath] = mod - dirpath = conftestpath.dirpath() - if dirpath in self._dirpath2confmods: - for path, mods in self._dirpath2confmods.items(): - if path and path.relto(dirpath) or path == dirpath: - assert mod not in mods - mods.append(mod) - self.trace("loaded conftestmodule %r" % (mod)) - self.consider_conftest(mod) - return mod - - # - # API for bootstrapping plugin loading - # - # - - def consider_preparse(self, args): - i = 0 - n = len(args) - while i < n: - opt = args[i] - i += 1 - if isinstance(opt, six.string_types): - if opt == "-p": - try: - parg = args[i] - except IndexError: - return - i += 1 - elif opt.startswith("-p"): - parg = opt[2:] - else: - continue - self.consider_pluginarg(parg) - - def consider_pluginarg(self, arg): - if arg.startswith("no:"): - name = arg[3:] - if name in essential_plugins: - raise UsageError("plugin %s cannot be disabled" % name) - - # PR #4304 : remove stepwise if cacheprovider is blocked - if name == "cacheprovider": - self.set_blocked("stepwise") - self.set_blocked("pytest_stepwise") - - self.set_blocked(name) - if not name.startswith("pytest_"): - self.set_blocked("pytest_" + name) - else: - name = arg - # Unblock the plugin. None indicates that it has been blocked. - # There is no interface with pluggy for this. - if self._name2plugin.get(name, -1) is None: - del self._name2plugin[name] - if not name.startswith("pytest_"): - if self._name2plugin.get("pytest_" + name, -1) is None: - del self._name2plugin["pytest_" + name] - self.import_plugin(arg, consider_entry_points=True) - - def consider_conftest(self, conftestmodule): - self.register(conftestmodule, name=conftestmodule.__file__) - - def consider_env(self): - self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) - - def consider_module(self, mod): - self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) - - def _import_plugin_specs(self, spec): - plugins = _get_plugin_specs_as_list(spec) - for import_spec in plugins: - self.import_plugin(import_spec) - - def import_plugin(self, modname, consider_entry_points=False): - """ - Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point - names are also considered to find a plugin. - """ - # most often modname refers to builtin modules, e.g. "pytester", - # "terminal" or "capture". Those plugins are registered under their - # basename for historic purposes but must be imported with the - # _pytest prefix. - assert isinstance(modname, six.string_types), ( - "module name as text required, got %r" % modname - ) - modname = str(modname) - if self.is_blocked(modname) or self.get_plugin(modname) is not None: - return - - importspec = "_pytest." + modname if modname in builtin_plugins else modname - self.rewrite_hook.mark_rewrite(importspec) - - if consider_entry_points: - loaded = self.load_setuptools_entrypoints("pytest11", name=modname) - if loaded: - return - - try: - __import__(importspec) - except ImportError as e: - new_exc_message = 'Error importing plugin "%s": %s' % ( - modname, - safe_str(e.args[0]), - ) - new_exc = ImportError(new_exc_message) - tb = sys.exc_info()[2] - - six.reraise(ImportError, new_exc, tb) - - except Skipped as e: - from _pytest.warnings import _issue_warning_captured - - _issue_warning_captured( - PytestConfigWarning("skipped plugin %r: %s" % (modname, e.msg)), - self.hook, - stacklevel=1, - ) - else: - mod = sys.modules[importspec] - self.register(mod, modname) - - -def _get_plugin_specs_as_list(specs): - """ - Parses a list of "plugin specs" and returns a list of plugin names. - - Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in - which case it is returned as a list. Specs can also be `None` in which case an - empty list is returned. - """ - if specs is not None and not isinstance(specs, types.ModuleType): - if isinstance(specs, six.string_types): - specs = specs.split(",") if specs else [] - if not isinstance(specs, (list, tuple)): - raise UsageError( - "Plugin specs must be a ','-separated string or a " - "list/tuple of strings for plugin names. Given: %r" % specs - ) - return list(specs) - return [] - - -def _ensure_removed_sysmodule(modname): - try: - del sys.modules[modname] - except KeyError: - pass - - -class Notset(object): - def __repr__(self): - return "<NOTSET>" - - -notset = Notset() - - -def _iter_rewritable_modules(package_files): - """ - Given an iterable of file names in a source distribution, return the "names" that should - be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should - be added as "pytest_mock" in the assertion rewrite mechanism. - - This function has to deal with dist-info based distributions and egg based distributions - (which are still very much in use for "editable" installs). - - Here are the file names as seen in a dist-info based distribution: - - pytest_mock/__init__.py - pytest_mock/_version.py - pytest_mock/plugin.py - pytest_mock.egg-info/PKG-INFO - - Here are the file names as seen in an egg based distribution: - - src/pytest_mock/__init__.py - src/pytest_mock/_version.py - src/pytest_mock/plugin.py - src/pytest_mock.egg-info/PKG-INFO - LICENSE - setup.py - - We have to take in account those two distribution flavors in order to determine which - names should be considered for assertion rewriting. - - More information: - https://github.com/pytest-dev/pytest-mock/issues/167 - """ - package_files = list(package_files) - seen_some = False - for fn in package_files: - is_simple_module = "/" not in fn and fn.endswith(".py") - is_package = fn.count("/") == 1 and fn.endswith("__init__.py") - if is_simple_module: - module_name, _ = os.path.splitext(fn) - # we ignore "setup.py" at the root of the distribution - if module_name != "setup": - seen_some = True - yield module_name - elif is_package: - package_name = os.path.dirname(fn) - seen_some = True - yield package_name - - if not seen_some: - # at this point we did not find any packages or modules suitable for assertion - # rewriting, so we try again by stripping the first path component (to account for - # "src" based source trees for example) - # this approach lets us have the common case continue to be fast, as egg-distributions - # are rarer - new_package_files = [] - for fn in package_files: - parts = fn.split("/") - new_fn = "/".join(parts[1:]) - if new_fn: - new_package_files.append(new_fn) - if new_package_files: - for _module in _iter_rewritable_modules(new_package_files): - yield _module - - -class Config(object): - """ - Access to configuration values, pluginmanager and plugin hooks. - - :ivar PytestPluginManager pluginmanager: the plugin manager handles plugin registration and hook invocation. - - :ivar argparse.Namespace option: access to command line option as attributes. - - :ivar InvocationParams invocation_params: - - Object containing the parameters regarding the ``pytest.main`` - invocation. - Contains the followinig read-only attributes: - * ``args``: list of command-line arguments as passed to ``pytest.main()``. - * ``plugins``: list of extra plugins, might be None - * ``dir``: directory where ``pytest.main()`` was invoked from. - """ - - @attr.s(frozen=True) - class InvocationParams(object): - """Holds parameters passed during ``pytest.main()`` - - .. note:: - - Currently the environment variable PYTEST_ADDOPTS is also handled by - pytest implicitly, not being part of the invocation. - - Plugins accessing ``InvocationParams`` must be aware of that. - """ - - args = attr.ib() - plugins = attr.ib() - dir = attr.ib() - - def __init__(self, pluginmanager, invocation_params=None, *args): - from .argparsing import Parser, FILE_OR_DIR - - if invocation_params is None: - invocation_params = self.InvocationParams( - args=(), plugins=None, dir=Path().resolve() - ) - - #: access to command line option as attributes. - #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead - self.option = argparse.Namespace() - - self.invocation_params = invocation_params - - _a = FILE_OR_DIR - self._parser = Parser( - usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), - processopt=self._processopt, - ) - #: a pluginmanager instance - self.pluginmanager = pluginmanager - self.trace = self.pluginmanager.trace.root.get("config") - self.hook = self.pluginmanager.hook - self._inicache = {} - self._override_ini = () - self._opt2dest = {} - self._cleanup = [] - self.pluginmanager.register(self, "pytestconfig") - self._configured = False - self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) - - @property - def invocation_dir(self): - """Backward compatibility""" - return py.path.local(str(self.invocation_params.dir)) - - def add_cleanup(self, func): - """ Add a function to be called when the config object gets out of - use (usually coninciding with pytest_unconfigure).""" - self._cleanup.append(func) - - def _do_configure(self): - assert not self._configured - self._configured = True - self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) - - def _ensure_unconfigure(self): - if self._configured: - self._configured = False - self.hook.pytest_unconfigure(config=self) - self.hook.pytest_configure._call_history = [] - while self._cleanup: - fin = self._cleanup.pop() - fin() - - def get_terminal_writer(self): - return self.pluginmanager.get_plugin("terminalreporter")._tw - - def pytest_cmdline_parse(self, pluginmanager, args): - try: - self.parse(args) - except UsageError: - - # Handle --version and --help here in a minimal fashion. - # This gets done via helpconfig normally, but its - # pytest_cmdline_main is not called in case of errors. - if getattr(self.option, "version", False) or "--version" in args: - from _pytest.helpconfig import showversion - - showversion(self) - elif ( - getattr(self.option, "help", False) or "--help" in args or "-h" in args - ): - self._parser._getparser().print_help() - sys.stdout.write( - "\nNOTE: displaying only minimal help due to UsageError.\n\n" - ) - - raise - - return self - - def notify_exception(self, excinfo, option=None): - if option and getattr(option, "fulltrace", False): - style = "long" - else: - style = "native" - excrepr = excinfo.getrepr( - funcargs=True, showlocals=getattr(option, "showlocals", False), style=style - ) - res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) - if not any(res): - for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" % line) - sys.stderr.flush() - - def cwd_relative_nodeid(self, nodeid): - # nodeid's are relative to the rootpath, compute relative to cwd - if self.invocation_dir != self.rootdir: - fullpath = self.rootdir.join(nodeid) - nodeid = self.invocation_dir.bestrelpath(fullpath) - return nodeid - - @classmethod - def fromdictargs(cls, option_dict, args): - """ constructor useable for subprocesses. """ - config = get_config(args) - config.option.__dict__.update(option_dict) - config.parse(args, addopts=False) - for x in config.option.plugins: - config.pluginmanager.consider_pluginarg(x) - return config - - def _processopt(self, opt): - for name in opt._short_opts + opt._long_opts: - self._opt2dest[name] = opt.dest - - if hasattr(opt, "default") and opt.dest: - if not hasattr(self.option, opt.dest): - setattr(self.option, opt.dest, opt.default) - - @hookimpl(trylast=True) - def pytest_load_initial_conftests(self, early_config): - self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) - - def _initini(self, args): - ns, unknown_args = self._parser.parse_known_and_unknown_args( - args, namespace=copy.copy(self.option) - ) - r = determine_setup( - ns.inifilename, - ns.file_or_dir + unknown_args, - rootdir_cmd_arg=ns.rootdir or None, - config=self, - ) - self.rootdir, self.inifile, self.inicfg = r - self._parser.extra_info["rootdir"] = self.rootdir - self._parser.extra_info["inifile"] = self.inifile - self._parser.addini("addopts", "extra command line options", "args") - self._parser.addini("minversion", "minimally required pytest version") - self._override_ini = ns.override_ini or () - - def _consider_importhook(self, args): - """Install the PEP 302 import hook if using assertion rewriting. - - Needs to parse the --assert=<mode> option from the commandline - and find all the installed plugins to mark them for rewriting - by the importhook. - """ - ns, unknown_args = self._parser.parse_known_and_unknown_args(args) - mode = getattr(ns, "assertmode", "plain") - if mode == "rewrite": - try: - hook = _pytest.assertion.install_importhook(self) - except SystemError: - mode = "plain" - else: - self._mark_plugins_for_rewrite(hook) - _warn_about_missing_assertion(mode) - - def _mark_plugins_for_rewrite(self, hook): - """ - Given an importhook, mark for rewrite any top-level - modules or packages in the distribution package for - all pytest plugins. - """ - self.pluginmanager.rewrite_hook = hook - - if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): - # We don't autoload from setuptools entry points, no need to continue. - return - - package_files = ( - str(file) - for dist in importlib_metadata.distributions() - if any(ep.group == "pytest11" for ep in dist.entry_points) - for file in dist.files or [] - ) - - for name in _iter_rewritable_modules(package_files): - hook.mark_rewrite(name) - - def _validate_args(self, args, via): - """Validate known args.""" - self._parser._config_source_hint = via - try: - self._parser.parse_known_and_unknown_args( - args, namespace=copy.copy(self.option) - ) - finally: - del self._parser._config_source_hint - - return args - - def _preparse(self, args, addopts=True): - if addopts: - env_addopts = os.environ.get("PYTEST_ADDOPTS", "") - if len(env_addopts): - args[:] = ( - self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") - + args - ) - self._initini(args) - if addopts: - args[:] = ( - self._validate_args(self.getini("addopts"), "via addopts config") + args - ) - - self._checkversion() - self._consider_importhook(args) - self.pluginmanager.consider_preparse(args) - if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): - # Don't autoload from setuptools entry point. Only explicitly specified - # plugins are going to be loaded. - self.pluginmanager.load_setuptools_entrypoints("pytest11") - self.pluginmanager.consider_env() - self.known_args_namespace = ns = self._parser.parse_known_args( - args, namespace=copy.copy(self.option) - ) - if self.known_args_namespace.confcutdir is None and self.inifile: - confcutdir = py.path.local(self.inifile).dirname - self.known_args_namespace.confcutdir = confcutdir - try: - self.hook.pytest_load_initial_conftests( - early_config=self, args=args, parser=self._parser - ) - except ConftestImportFailure: - e = sys.exc_info()[1] - if ns.help or ns.version: - # we don't want to prevent --help/--version to work - # so just let is pass and print a warning at the end - from _pytest.warnings import _issue_warning_captured - - _issue_warning_captured( - PytestConfigWarning( - "could not load initial conftests: {}".format(e.path) - ), - self.hook, - stacklevel=2, - ) - else: - raise - - def _checkversion(self): - import pytest - - minver = self.inicfg.get("minversion", None) - if minver: - if Version(minver) > Version(pytest.__version__): - raise pytest.UsageError( - "%s:%d: requires pytest-%s, actual pytest-%s'" - % ( - self.inicfg.config.path, - self.inicfg.lineof("minversion"), - minver, - pytest.__version__, - ) - ) - - def parse(self, args, addopts=True): - # parse given cmdline arguments into this config object. - assert not hasattr( - self, "args" - ), "can only parse cmdline args at most once per Config object" - self._origargs = args - self.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=self.pluginmanager) - ) - self._preparse(args, addopts=addopts) - # XXX deprecated hook: - self.hook.pytest_cmdline_preparse(config=self, args=args) - self._parser.after_preparse = True - try: - args = self._parser.parse_setoption( - args, self.option, namespace=self.option - ) - if not args: - if self.invocation_dir == self.rootdir: - args = self.getini("testpaths") - if not args: - args = [str(self.invocation_dir)] - self.args = args - except PrintHelp: - pass - - def addinivalue_line(self, name, line): - """ add a line to an ini-file option. The option must have been - declared but might not yet be set in which case the line becomes the - the first line in its value. """ - x = self.getini(name) - assert isinstance(x, list) - x.append(line) # modifies the cached list inline - - def getini(self, name): - """ return configuration value from an :ref:`ini file <inifiles>`. If the - specified name hasn't been registered through a prior - :py:func:`parser.addini <_pytest.config.Parser.addini>` - call (usually from a plugin), a ValueError is raised. """ - try: - return self._inicache[name] - except KeyError: - self._inicache[name] = val = self._getini(name) - return val - - def _getini(self, name): - try: - description, type, default = self._parser._inidict[name] - except KeyError: - raise ValueError("unknown configuration value: %r" % (name,)) - value = self._get_override_ini_value(name) - if value is None: - try: - value = self.inicfg[name] - except KeyError: - if default is not None: - return default - if type is None: - return "" - return [] - if type == "pathlist": - dp = py.path.local(self.inicfg.config.path).dirpath() - values = [] - for relpath in shlex.split(value): - values.append(dp.join(relpath, abs=True)) - return values - elif type == "args": - return shlex.split(value) - elif type == "linelist": - return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] - elif type == "bool": - return bool(_strtobool(value.strip())) - else: - assert type is None - return value - - def _getconftest_pathlist(self, name, path): - try: - mod, relroots = self.pluginmanager._rget_with_confmod(name, path) - except KeyError: - return None - modpath = py.path.local(mod.__file__).dirpath() - values = [] - for relroot in relroots: - if not isinstance(relroot, py.path.local): - relroot = relroot.replace("/", py.path.local.sep) - relroot = modpath.join(relroot, abs=True) - values.append(relroot) - return values - - def _get_override_ini_value(self, name): - value = None - # override_ini is a list of "ini=value" options - # always use the last item if multiple values are set for same ini-name, - # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 - for ini_config in self._override_ini: - try: - key, user_ini_value = ini_config.split("=", 1) - except ValueError: - raise UsageError("-o/--override-ini expects option=value style.") - else: - if key == name: - value = user_ini_value - return value - - def getoption(self, name, default=notset, skip=False): - """ return command line option value. - - :arg name: name of the option. You may also specify - the literal ``--OPT`` option instead of the "dest" option name. - :arg default: default value if no option of that name exists. - :arg skip: if True raise pytest.skip if option does not exists - or has a None value. - """ - name = self._opt2dest.get(name, name) - try: - val = getattr(self.option, name) - if val is None and skip: - raise AttributeError(name) - return val - except AttributeError: - if default is not notset: - return default - if skip: - import pytest - - pytest.skip("no %r option found" % (name,)) - raise ValueError("no option named %r" % (name,)) - - def getvalue(self, name, path=None): - """ (deprecated, use getoption()) """ - return self.getoption(name) - - def getvalueorskip(self, name, path=None): - """ (deprecated, use getoption(skip=True)) """ - return self.getoption(name, skip=True) - - -def _assertion_supported(): - try: - assert False - except AssertionError: - return True - else: - return False - - -def _warn_about_missing_assertion(mode): - if not _assertion_supported(): - if mode == "plain": - sys.stderr.write( - "WARNING: ASSERTIONS ARE NOT EXECUTED" - " and FAILING TESTS WILL PASS. Are you" - " using python -O?" - ) - else: - sys.stderr.write( - "WARNING: assertions not in test modules or" - " plugins will be ignored" - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n" - ) - - -def setns(obj, dic): - import pytest - - for name, value in dic.items(): - if isinstance(value, dict): - mod = getattr(obj, name, None) - if mod is None: - modname = "pytest.%s" % name - mod = types.ModuleType(modname) - sys.modules[modname] = mod - mod.__all__ = [] - setattr(obj, name, mod) - obj.__all__.append(name) - setns(mod, value) - else: - setattr(obj, name, value) - obj.__all__.append(name) - # if obj != pytest: - # pytest.__all__.append(name) - setattr(pytest, name, value) - - -def create_terminal_writer(config, *args, **kwargs): - """Create a TerminalWriter instance configured according to the options - in the config object. Every code which requires a TerminalWriter object - and has access to a config object should use this function. - """ - tw = py.io.TerminalWriter(*args, **kwargs) - if config.option.color == "yes": - tw.hasmarkup = True - if config.option.color == "no": - tw.hasmarkup = False - return tw - - -def _strtobool(val): - """Convert a string representation of truth to true (1) or false (0). - - True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values - are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if - 'val' is anything else. - - .. note:: copied from distutils.util - """ - val = val.lower() - if val in ("y", "yes", "t", "true", "on", "1"): - return 1 - elif val in ("n", "no", "f", "false", "off", "0"): - return 0 - else: - raise ValueError("invalid truth value %r" % (val,)) diff --git a/contrib/python/pytest/py2/_pytest/config/argparsing.py b/contrib/python/pytest/py2/_pytest/config/argparsing.py deleted file mode 100644 index 37fb772db9..0000000000 --- a/contrib/python/pytest/py2/_pytest/config/argparsing.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -import argparse -import warnings - -import py -import six - -from _pytest.config.exceptions import UsageError - -FILE_OR_DIR = "file_or_dir" - - -class Parser(object): - """ Parser for command line arguments and ini-file values. - - :ivar extra_info: dict of generic param -> value to display in case - there's an error processing the command line arguments. - """ - - prog = None - - def __init__(self, usage=None, processopt=None): - self._anonymous = OptionGroup("custom options", parser=self) - self._groups = [] - self._processopt = processopt - self._usage = usage - self._inidict = {} - self._ininames = [] - self.extra_info = {} - - def processoption(self, option): - if self._processopt: - if option.dest: - self._processopt(option) - - def getgroup(self, name, description="", after=None): - """ get (or create) a named option Group. - - :name: name of the option group. - :description: long description for --help output. - :after: name of other group, used for ordering --help output. - - The returned group object has an ``addoption`` method with the same - signature as :py:func:`parser.addoption - <_pytest.config.Parser.addoption>` but will be shown in the - respective group in the output of ``pytest. --help``. - """ - for group in self._groups: - if group.name == name: - return group - group = OptionGroup(name, description, parser=self) - i = 0 - for i, grp in enumerate(self._groups): - if grp.name == after: - break - self._groups.insert(i + 1, group) - return group - - def addoption(self, *opts, **attrs): - """ register a command line option. - - :opts: option names, can be short or long options. - :attrs: same attributes which the ``add_option()`` function of the - `argparse library - <http://docs.python.org/2/library/argparse.html>`_ - accepts. - - After command line parsing options are available on the pytest config - object via ``config.option.NAME`` where ``NAME`` is usually set - by passing a ``dest`` attribute, for example - ``addoption("--long", dest="NAME", ...)``. - """ - self._anonymous.addoption(*opts, **attrs) - - def parse(self, args, namespace=None): - from _pytest._argcomplete import try_argcomplete - - self.optparser = self._getparser() - try_argcomplete(self.optparser) - args = [str(x) if isinstance(x, py.path.local) else x for x in args] - return self.optparser.parse_args(args, namespace=namespace) - - def _getparser(self): - from _pytest._argcomplete import filescompleter - - optparser = MyOptionParser(self, self.extra_info, prog=self.prog) - groups = self._groups + [self._anonymous] - for group in groups: - if group.options: - desc = group.description or group.name - arggroup = optparser.add_argument_group(desc) - for option in group.options: - n = option.names() - a = option.attrs() - arggroup.add_argument(*n, **a) - # bash like autocompletion for dirs (appending '/') - optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter - return optparser - - def parse_setoption(self, args, option, namespace=None): - parsedoption = self.parse(args, namespace=namespace) - for name, value in parsedoption.__dict__.items(): - setattr(option, name, value) - return getattr(parsedoption, FILE_OR_DIR) - - def parse_known_args(self, args, namespace=None): - """parses and returns a namespace object with known arguments at this - point. - """ - return self.parse_known_and_unknown_args(args, namespace=namespace)[0] - - def parse_known_and_unknown_args(self, args, namespace=None): - """parses and returns a namespace object with known arguments, and - the remaining arguments unknown at this point. - """ - optparser = self._getparser() - args = [str(x) if isinstance(x, py.path.local) else x for x in args] - return optparser.parse_known_args(args, namespace=namespace) - - def addini(self, name, help, type=None, default=None): - """ register an ini-file option. - - :name: name of the ini-variable - :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` - or ``bool``. - :default: default value if no ini-file option exists but is queried. - - The value of ini-variables can be retrieved via a call to - :py:func:`config.getini(name) <_pytest.config.Config.getini>`. - """ - assert type in (None, "pathlist", "args", "linelist", "bool") - self._inidict[name] = (help, type, default) - self._ininames.append(name) - - -class ArgumentError(Exception): - """ - Raised if an Argument instance is created with invalid or - inconsistent arguments. - """ - - def __init__(self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__(self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - - -class Argument(object): - """class that mimics the necessary behaviour of optparse.Option - - it's currently a least effort implementation - and ignoring choices and integer prefixes - https://docs.python.org/3/library/optparse.html#optparse-standard-option-types - """ - - _typ_map = {"int": int, "string": str, "float": float, "complex": complex} - - def __init__(self, *names, **attrs): - """store parms in private vars for use in add_argument""" - self._attrs = attrs - self._short_opts = [] - self._long_opts = [] - self.dest = attrs.get("dest") - if "%default" in (attrs.get("help") or ""): - warnings.warn( - 'pytest now uses argparse. "%default" should be' - ' changed to "%(default)s" ', - DeprecationWarning, - stacklevel=3, - ) - try: - typ = attrs["type"] - except KeyError: - pass - else: - # this might raise a keyerror as well, don't want to catch that - if isinstance(typ, six.string_types): - if typ == "choice": - warnings.warn( - "`type` argument to addoption() is the string %r." - " For choices this is optional and can be omitted, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, - stacklevel=4, - ) - # argparse expects a type here take it from - # the type of the first element - attrs["type"] = type(attrs["choices"][0]) - else: - warnings.warn( - "`type` argument to addoption() is the string %r, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, - stacklevel=4, - ) - attrs["type"] = Argument._typ_map[typ] - # used in test_parseopt -> test_parse_defaultgetter - self.type = attrs["type"] - else: - self.type = typ - try: - # attribute existence is tested in Config._processopt - self.default = attrs["default"] - except KeyError: - pass - self._set_opt_strings(names) - if not self.dest: - if self._long_opts: - self.dest = self._long_opts[0][2:].replace("-", "_") - else: - try: - self.dest = self._short_opts[0][1:] - except IndexError: - raise ArgumentError("need a long or short option", self) - - def names(self): - return self._short_opts + self._long_opts - - def attrs(self): - # update any attributes set by processopt - attrs = "default dest help".split() - if self.dest: - attrs.append(self.dest) - for attr in attrs: - try: - self._attrs[attr] = getattr(self, attr) - except AttributeError: - pass - if self._attrs.get("help"): - a = self._attrs["help"] - a = a.replace("%default", "%(default)s") - # a = a.replace('%prog', '%(prog)s') - self._attrs["help"] = a - return self._attrs - - def _set_opt_strings(self, opts): - """directly from optparse - - might not be necessary as this is passed to argparse later on""" - for opt in opts: - if len(opt) < 2: - raise ArgumentError( - "invalid option string %r: " - "must be at least two characters long" % opt, - self, - ) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise ArgumentError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self, - ) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise ArgumentError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self, - ) - self._long_opts.append(opt) - - def __repr__(self): - args = [] - if self._short_opts: - args += ["_short_opts: " + repr(self._short_opts)] - if self._long_opts: - args += ["_long_opts: " + repr(self._long_opts)] - args += ["dest: " + repr(self.dest)] - if hasattr(self, "type"): - args += ["type: " + repr(self.type)] - if hasattr(self, "default"): - args += ["default: " + repr(self.default)] - return "Argument({})".format(", ".join(args)) - - -class OptionGroup(object): - def __init__(self, name, description="", parser=None): - self.name = name - self.description = description - self.options = [] - self.parser = parser - - def addoption(self, *optnames, **attrs): - """ add an option to this group. - - if a shortened version of a long option is specified it will - be suppressed in the help. addoption('--twowords', '--two-words') - results in help showing '--two-words' only, but --twowords gets - accepted **and** the automatic destination is in args.twowords - """ - conflict = set(optnames).intersection( - name for opt in self.options for name in opt.names() - ) - if conflict: - raise ValueError("option names %s already added" % conflict) - option = Argument(*optnames, **attrs) - self._addoption_instance(option, shortupper=False) - - def _addoption(self, *optnames, **attrs): - option = Argument(*optnames, **attrs) - self._addoption_instance(option, shortupper=True) - - def _addoption_instance(self, option, shortupper=False): - if not shortupper: - for opt in option._short_opts: - if opt[0] == "-" and opt[1].islower(): - raise ValueError("lowercase shortoptions reserved") - if self.parser: - self.parser.processoption(option) - self.options.append(option) - - -class MyOptionParser(argparse.ArgumentParser): - def __init__(self, parser, extra_info=None, prog=None): - if not extra_info: - extra_info = {} - self._parser = parser - argparse.ArgumentParser.__init__( - self, - prog=prog, - usage=parser._usage, - add_help=False, - formatter_class=DropShorterLongHelpFormatter, - ) - # extra_info is a dict of (param -> value) to display if there's - # an usage error to provide more contextual information to the user - self.extra_info = extra_info - - def error(self, message): - """Transform argparse error message into UsageError.""" - msg = "%s: error: %s" % (self.prog, message) - - if hasattr(self._parser, "_config_source_hint"): - msg = "%s (%s)" % (msg, self._parser._config_source_hint) - - raise UsageError(self.format_usage() + msg) - - def parse_args(self, args=None, namespace=None): - """allow splitting of positional arguments""" - args, argv = self.parse_known_args(args, namespace) - if argv: - for arg in argv: - if arg and arg[0] == "-": - lines = ["unrecognized arguments: %s" % (" ".join(argv))] - for k, v in sorted(self.extra_info.items()): - lines.append(" %s: %s" % (k, v)) - self.error("\n".join(lines)) - getattr(args, FILE_OR_DIR).extend(argv) - return args - - -class DropShorterLongHelpFormatter(argparse.HelpFormatter): - """shorten help for long options that differ only in extra hyphens - - - collapse **long** options that are the same except for extra hyphens - - special action attribute map_long_option allows surpressing additional - long options - - shortcut if there are only two options and one of them is a short one - - cache result on action object as this is called at least 2 times - """ - - def _format_action_invocation(self, action): - orgstr = argparse.HelpFormatter._format_action_invocation(self, action) - if orgstr and orgstr[0] != "-": # only optional arguments - return orgstr - res = getattr(action, "_formatted_action_invocation", None) - if res: - return res - options = orgstr.split(", ") - if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): - # a shortcut for '-h, --help' or '--abc', '-a' - action._formatted_action_invocation = orgstr - return orgstr - return_list = [] - option_map = getattr(action, "map_long_option", {}) - if option_map is None: - option_map = {} - short_long = {} - for option in options: - if len(option) == 2 or option[2] == " ": - continue - if not option.startswith("--"): - raise ArgumentError( - 'long optional argument without "--": [%s]' % (option), self - ) - xxoption = option[2:] - if xxoption.split()[0] not in option_map: - shortened = xxoption.replace("-", "") - if shortened not in short_long or len(short_long[shortened]) < len( - xxoption - ): - short_long[shortened] = xxoption - # now short_long has been filled out to the longest with dashes - # **and** we keep the right option ordering from add_argument - for option in options: - if len(option) == 2 or option[2] == " ": - return_list.append(option) - if option[2:] == short_long.get(option.replace("-", "")): - return_list.append(option.replace(" ", "=", 1)) - action._formatted_action_invocation = ", ".join(return_list) - return action._formatted_action_invocation diff --git a/contrib/python/pytest/py2/_pytest/config/exceptions.py b/contrib/python/pytest/py2/_pytest/config/exceptions.py deleted file mode 100644 index bf58fde5db..0000000000 --- a/contrib/python/pytest/py2/_pytest/config/exceptions.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -class UsageError(Exception): - """ error in pytest usage or invocation""" - - -class PrintHelp(Exception): - """Raised when pytest should print it's help to skip the rest of the - argument parsing and validation.""" - - pass diff --git a/contrib/python/pytest/py2/_pytest/config/findpaths.py b/contrib/python/pytest/py2/_pytest/config/findpaths.py deleted file mode 100644 index e6779b289b..0000000000 --- a/contrib/python/pytest/py2/_pytest/config/findpaths.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import py - -from .exceptions import UsageError -from _pytest.outcomes import fail - - -def exists(path, ignore=EnvironmentError): - try: - return path.check() - except ignore: - return False - - -def getcfg(args, config=None): - """ - Search the list of arguments for a valid ini-file for pytest, - and return a tuple of (rootdir, inifile, cfg-dict). - - note: config is optional and used only to issue warnings explicitly (#2891). - """ - from _pytest.deprecated import CFG_PYTEST_SECTION - - inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] - args = [x for x in args if not str(x).startswith("-")] - if not args: - args = [py.path.local()] - for arg in args: - arg = py.path.local(arg) - for base in arg.parts(reverse=True): - for inibasename in inibasenames: - p = base.join(inibasename) - if exists(p): - try: - iniconfig = py.iniconfig.IniConfig(p) - except py.iniconfig.ParseError as exc: - raise UsageError(str(exc)) - - if ( - inibasename == "setup.cfg" - and "tool:pytest" in iniconfig.sections - ): - return base, p, iniconfig["tool:pytest"] - elif "pytest" in iniconfig.sections: - if inibasename == "setup.cfg" and config is not None: - - fail( - CFG_PYTEST_SECTION.format(filename=inibasename), - pytrace=False, - ) - return base, p, iniconfig["pytest"] - elif inibasename == "pytest.ini": - # allowed to be empty - return base, p, {} - return None, None, None - - -def get_common_ancestor(paths): - common_ancestor = None - for path in paths: - if not path.exists(): - continue - if common_ancestor is None: - common_ancestor = path - else: - if path.relto(common_ancestor) or path == common_ancestor: - continue - elif common_ancestor.relto(path): - common_ancestor = path - else: - shared = path.common(common_ancestor) - if shared is not None: - common_ancestor = shared - if common_ancestor is None: - common_ancestor = py.path.local() - elif common_ancestor.isfile(): - common_ancestor = common_ancestor.dirpath() - return common_ancestor - - -def get_dirs_from_args(args): - def is_option(x): - return str(x).startswith("-") - - def get_file_part_from_node_id(x): - return str(x).split("::")[0] - - def get_dir_from_path(path): - if path.isdir(): - return path - return py.path.local(path.dirname) - - # These look like paths but may not exist - possible_paths = ( - py.path.local(get_file_part_from_node_id(arg)) - for arg in args - if not is_option(arg) - ) - - return [get_dir_from_path(path) for path in possible_paths if path.exists()] - - -def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): - dirs = get_dirs_from_args(args) - if inifile: - iniconfig = py.iniconfig.IniConfig(inifile) - is_cfg_file = str(inifile).endswith(".cfg") - sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] - for section in sections: - try: - inicfg = iniconfig[section] - if is_cfg_file and section == "pytest" and config is not None: - from _pytest.deprecated import CFG_PYTEST_SECTION - - fail( - CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False - ) - break - except KeyError: - inicfg = None - if rootdir_cmd_arg is None: - rootdir = get_common_ancestor(dirs) - else: - ancestor = get_common_ancestor(dirs) - rootdir, inifile, inicfg = getcfg([ancestor], config=config) - if rootdir is None and rootdir_cmd_arg is None: - for possible_rootdir in ancestor.parts(reverse=True): - if possible_rootdir.join("setup.py").exists(): - rootdir = possible_rootdir - break - else: - if dirs != [ancestor]: - rootdir, inifile, inicfg = getcfg(dirs, config=config) - if rootdir is None: - if config is not None: - cwd = config.invocation_dir - else: - cwd = py.path.local() - rootdir = get_common_ancestor([cwd, ancestor]) - is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" - if is_fs_root: - rootdir = ancestor - if rootdir_cmd_arg: - rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg)) - if not rootdir.isdir(): - raise UsageError( - "Directory '{}' not found. Check your '--rootdir' option.".format( - rootdir - ) - ) - return rootdir, inifile, inicfg or {} diff --git a/contrib/python/pytest/py2/_pytest/debugging.py b/contrib/python/pytest/py2/_pytest/debugging.py deleted file mode 100644 index bc114c8683..0000000000 --- a/contrib/python/pytest/py2/_pytest/debugging.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- -""" interactive debugging with PDB, the Python Debugger. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import argparse -import pdb -import sys -from doctest import UnexpectedException - -from _pytest import outcomes -from _pytest.config import hookimpl -from _pytest.config.exceptions import UsageError - - -def import_readline(): - try: - import readline - except ImportError: - sys.path.append('/usr/lib/python2.7/lib-dynload') - - try: - import readline - except ImportError as e: - print('can not import readline:', e) - - import subprocess - try: - subprocess.check_call('stty icrnl'.split()) - except OSError as e: - print('can not restore Enter, use Control+J:', e) - - -def tty(): - if os.isatty(1): - return - - fd = os.open('/dev/tty', os.O_RDWR) - os.dup2(fd, 0) - os.dup2(fd, 1) - os.dup2(fd, 2) - os.close(fd) - - old_sys_path = sys.path - sys.path = list(sys.path) - try: - import_readline() - finally: - sys.path = old_sys_path - - -def _validate_usepdb_cls(value): - """Validate syntax of --pdbcls option.""" - try: - modname, classname = value.split(":") - except ValueError: - raise argparse.ArgumentTypeError( - "{!r} is not in the format 'modname:classname'".format(value) - ) - return (modname, classname) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "--pdb", - dest="usepdb", - action="store_true", - help="start the interactive Python debugger on errors or KeyboardInterrupt.", - ) - group._addoption( - "--pdbcls", - dest="usepdb_cls", - metavar="modulename:classname", - type=_validate_usepdb_cls, - help="start a custom interactive Python debugger on errors. " - "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", - ) - group._addoption( - "--trace", - dest="trace", - action="store_true", - help="Immediately break when running each test.", - ) - - -def pytest_configure(config): - if config.getvalue("trace"): - config.pluginmanager.register(PdbTrace(), "pdbtrace") - if config.getvalue("usepdb"): - config.pluginmanager.register(PdbInvoke(), "pdbinvoke") - - pytestPDB._saved.append( - (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) - ) - pdb.set_trace = pytestPDB.set_trace - pytestPDB._pluginmanager = config.pluginmanager - pytestPDB._config = config - - # NOTE: not using pytest_unconfigure, since it might get called although - # pytest_configure was not (if another plugin raises UsageError). - def fin(): - ( - pdb.set_trace, - pytestPDB._pluginmanager, - pytestPDB._config, - ) = pytestPDB._saved.pop() - - config._cleanup.append(fin) - - -class pytestPDB(object): - """ Pseudo PDB that defers to the real pdb. """ - - _pluginmanager = None - _config = None - _saved = [] - _recursive_debug = 0 - _wrapped_pdb_cls = None - - @classmethod - def _is_capturing(cls, capman): - if capman: - return capman.is_capturing() - return False - - @classmethod - def _import_pdb_cls(cls, capman): - if not cls._config: - # Happens when using pytest.set_trace outside of a test. - return pdb.Pdb - - usepdb_cls = cls._config.getvalue("usepdb_cls") - - if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: - return cls._wrapped_pdb_cls[1] - - if usepdb_cls: - modname, classname = usepdb_cls - - try: - __import__(modname) - mod = sys.modules[modname] - - # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). - parts = classname.split(".") - pdb_cls = getattr(mod, parts[0]) - for part in parts[1:]: - pdb_cls = getattr(pdb_cls, part) - except Exception as exc: - value = ":".join((modname, classname)) - raise UsageError( - "--pdbcls: could not import {!r}: {}".format(value, exc) - ) - else: - pdb_cls = pdb.Pdb - - wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) - cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) - return wrapped_cls - - @classmethod - def _get_pdb_wrapper_class(cls, pdb_cls, capman): - import _pytest.config - - class PytestPdbWrapper(pdb_cls, object): - _pytest_capman = capman - _continued = False - - def do_debug(self, arg): - cls._recursive_debug += 1 - ret = super(PytestPdbWrapper, self).do_debug(arg) - cls._recursive_debug -= 1 - return ret - - def do_continue(self, arg): - ret = super(PytestPdbWrapper, self).do_continue(arg) - if cls._recursive_debug == 0: - tw = _pytest.config.create_terminal_writer(cls._config) - tw.line() - - capman = self._pytest_capman - capturing = pytestPDB._is_capturing(capman) - if capturing: - if capturing == "global": - tw.sep(">", "PDB continue (IO-capturing resumed)") - else: - tw.sep( - ">", - "PDB continue (IO-capturing resumed for %s)" - % capturing, - ) - capman.resume() - else: - tw.sep(">", "PDB continue") - cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) - self._continued = True - return ret - - do_c = do_cont = do_continue - - def do_quit(self, arg): - """Raise Exit outcome when quit command is used in pdb. - - This is a bit of a hack - it would be better if BdbQuit - could be handled, but this would require to wrap the - whole pytest run, and adjust the report etc. - """ - ret = super(PytestPdbWrapper, self).do_quit(arg) - - if cls._recursive_debug == 0: - outcomes.exit("Quitting debugger") - - return ret - - do_q = do_quit - do_exit = do_quit - - def setup(self, f, tb): - """Suspend on setup(). - - Needed after do_continue resumed, and entering another - breakpoint again. - """ - ret = super(PytestPdbWrapper, self).setup(f, tb) - if not ret and self._continued: - # pdb.setup() returns True if the command wants to exit - # from the interaction: do not suspend capturing then. - if self._pytest_capman: - self._pytest_capman.suspend_global_capture(in_=True) - return ret - - def get_stack(self, f, t): - stack, i = super(PytestPdbWrapper, self).get_stack(f, t) - if f is None: - # Find last non-hidden frame. - i = max(0, len(stack) - 1) - while i and stack[i][0].f_locals.get("__tracebackhide__", False): - i -= 1 - return stack, i - - return PytestPdbWrapper - - @classmethod - def _init_pdb(cls, method, *args, **kwargs): - """ Initialize PDB debugging, dropping any IO capturing. """ - import _pytest.config - - if cls._pluginmanager is not None: - capman = cls._pluginmanager.getplugin("capturemanager") - else: - capman = None - if capman: - capman.suspend(in_=True) - - if cls._config: - tw = _pytest.config.create_terminal_writer(cls._config) - tw.line() - - if cls._recursive_debug == 0: - # Handle header similar to pdb.set_trace in py37+. - header = kwargs.pop("header", None) - if header is not None: - tw.sep(">", header) - else: - capturing = cls._is_capturing(capman) - if capturing == "global": - tw.sep(">", "PDB %s (IO-capturing turned off)" % (method,)) - elif capturing: - tw.sep( - ">", - "PDB %s (IO-capturing turned off for %s)" - % (method, capturing), - ) - else: - tw.sep(">", "PDB %s" % (method,)) - - _pdb = cls._import_pdb_cls(capman)(**kwargs) - - if cls._pluginmanager: - cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) - return _pdb - - @classmethod - def set_trace(cls, *args, **kwargs): - """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" - tty() - frame = sys._getframe().f_back - _pdb = cls._init_pdb("set_trace", *args, **kwargs) - _pdb.set_trace(frame) - - -class PdbInvoke(object): - def pytest_exception_interact(self, node, call, report): - capman = node.config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture(in_=True) - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stdout.write(err) - tty() - _enter_pdb(node, call.excinfo, report) - - def pytest_internalerror(self, excrepr, excinfo): - tb = _postmortem_traceback(excinfo) - post_mortem(tb) - - -class PdbTrace(object): - @hookimpl(hookwrapper=True) - def pytest_pyfunc_call(self, pyfuncitem): - _test_pytest_function(pyfuncitem) - yield - - -def _test_pytest_function(pyfuncitem): - _pdb = pytestPDB._init_pdb("runcall") - testfunction = pyfuncitem.obj - pyfuncitem.obj = _pdb.runcall - if "func" in pyfuncitem._fixtureinfo.argnames: # pragma: no branch - raise ValueError("--trace can't be used with a fixture named func!") - pyfuncitem.funcargs["func"] = testfunction - new_list = list(pyfuncitem._fixtureinfo.argnames) - new_list.append("func") - pyfuncitem._fixtureinfo.argnames = tuple(new_list) - - -def _enter_pdb(node, excinfo, rep): - # XXX we re-use the TerminalReporter's terminalwriter - # because this seems to avoid some encoding related troubles - # for not completely clear reasons. - tw = node.config.pluginmanager.getplugin("terminalreporter")._tw - tw.line() - - showcapture = node.config.option.showcapture - - for sectionname, content in ( - ("stdout", rep.capstdout), - ("stderr", rep.capstderr), - ("log", rep.caplog), - ): - if showcapture in (sectionname, "all") and content: - tw.sep(">", "captured " + sectionname) - if content[-1:] == "\n": - content = content[:-1] - tw.line(content) - - tw.sep(">", "traceback") - rep.toterminal(tw) - tw.sep(">", "entering PDB") - tb = _postmortem_traceback(excinfo) - rep._pdbshown = True - post_mortem(tb) - return rep - - -def _postmortem_traceback(excinfo): - if isinstance(excinfo.value, UnexpectedException): - # A doctest.UnexpectedException is not useful for post_mortem. - # Use the underlying exception instead: - return excinfo.value.exc_info[2] - else: - return excinfo._excinfo[2] - - -def post_mortem(t): - p = pytestPDB._init_pdb("post_mortem") - p.reset() - p.interaction(None, t) - if p.quitting: - outcomes.exit("Quitting debugger") diff --git a/contrib/python/pytest/py2/_pytest/deprecated.py b/contrib/python/pytest/py2/_pytest/deprecated.py deleted file mode 100644 index 12394aca3f..0000000000 --- a/contrib/python/pytest/py2/_pytest/deprecated.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module contains deprecation messages and bits of code used elsewhere in the codebase -that is planned to be removed in the next pytest release. - -Keeping it in a central location makes it easy to track what is deprecated and should -be removed when the time comes. - -All constants defined in this module should be either PytestWarning instances or UnformattedWarning -in case of warnings which need to format their messages. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from _pytest.warning_types import PytestDeprecationWarning -from _pytest.warning_types import RemovedInPytest4Warning -from _pytest.warning_types import UnformattedWarning - -YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" - - -FIXTURE_FUNCTION_CALL = ( - 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' - "but are created automatically when test functions request them as parameters.\n" - "See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n" - "https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code." -) - -FIXTURE_NAMED_REQUEST = PytestDeprecationWarning( - "'request' is a reserved name for fixtures and will raise an error in future versions" -) - -CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." - -GETFUNCARGVALUE = RemovedInPytest4Warning( - "getfuncargvalue is deprecated, use getfixturevalue" -) - -RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning( - "The 'message' parameter is deprecated.\n" - "(did you mean to use `match='some regex'` to check the exception message?)\n" - "Please see:\n" - " https://docs.pytest.org/en/4.6-maintenance/deprecations.html#message-parameter-of-pytest-raises" -) - -RESULT_LOG = PytestDeprecationWarning( - "--result-log is deprecated and scheduled for removal in pytest 5.0.\n" - "See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information." -) - -RAISES_EXEC = PytestDeprecationWarning( - "raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n" - "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" -) -WARNS_EXEC = PytestDeprecationWarning( - "warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n" - "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" -) - -PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = ( - "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported " - "because it affects the entire directory tree in a non-explicit way.\n" - " {}\n" - "Please move it to a top level conftest file at the rootdir:\n" - " {}\n" - "For more information, visit:\n" - " https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" -) - -PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning( - "the `pytest.config` global is deprecated. Please use `request.config` " - "or `pytest_configure` (if you're a pytest plugin) instead." -) - -PYTEST_ENSURETEMP = RemovedInPytest4Warning( - "pytest/tmpdir_factory.ensuretemp is deprecated, \n" - "please use the tmp_path fixture or tmp_path_factory.mktemp" -) - -PYTEST_LOGWARNING = PytestDeprecationWarning( - "pytest_logwarning is deprecated, no longer being called, and will be removed soon\n" - "please use pytest_warning_captured instead" -) - -PYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning( - PytestDeprecationWarning, - "pytest.warns() got unexpected keyword arguments: {args!r}.\n" - "This will be an error in future versions.", -) - -PYTEST_PARAM_UNKNOWN_KWARGS = UnformattedWarning( - PytestDeprecationWarning, - "pytest.param() got unexpected keyword arguments: {args!r}.\n" - "This will be an error in future versions.", -) diff --git a/contrib/python/pytest/py2/_pytest/doctest.py b/contrib/python/pytest/py2/_pytest/doctest.py deleted file mode 100644 index 659d24aeeb..0000000000 --- a/contrib/python/pytest/py2/_pytest/doctest.py +++ /dev/null @@ -1,583 +0,0 @@ -# -*- coding: utf-8 -*- -""" discover and run doctests in modules and test files.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import inspect -import platform -import sys -import traceback -import warnings -from contextlib import contextmanager - -import pytest -from _pytest._code.code import ExceptionInfo -from _pytest._code.code import ReprFileLocation -from _pytest._code.code import TerminalRepr -from _pytest.compat import safe_getattr -from _pytest.fixtures import FixtureRequest -from _pytest.outcomes import Skipped -from _pytest.warning_types import PytestWarning - -DOCTEST_REPORT_CHOICE_NONE = "none" -DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" -DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" -DOCTEST_REPORT_CHOICE_UDIFF = "udiff" -DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" - -DOCTEST_REPORT_CHOICES = ( - DOCTEST_REPORT_CHOICE_NONE, - DOCTEST_REPORT_CHOICE_CDIFF, - DOCTEST_REPORT_CHOICE_NDIFF, - DOCTEST_REPORT_CHOICE_UDIFF, - DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, -) - -# Lazy definition of runner class -RUNNER_CLASS = None - - -def pytest_addoption(parser): - parser.addini( - "doctest_optionflags", - "option flags for doctests", - type="args", - default=["ELLIPSIS"], - ) - parser.addini( - "doctest_encoding", "encoding used for doctest files", default="utf-8" - ) - group = parser.getgroup("collect") - group.addoption( - "--doctest-modules", - action="store_true", - default=False, - help="run doctests in all .py modules", - dest="doctestmodules", - ) - group.addoption( - "--doctest-report", - type=str.lower, - default="udiff", - help="choose another output format for diffs on doctest failure", - choices=DOCTEST_REPORT_CHOICES, - dest="doctestreport", - ) - group.addoption( - "--doctest-glob", - action="append", - default=[], - metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob", - ) - group.addoption( - "--doctest-ignore-import-errors", - action="store_true", - default=False, - help="ignore doctest ImportErrors", - dest="doctest_ignore_import_errors", - ) - group.addoption( - "--doctest-continue-on-failure", - action="store_true", - default=False, - help="for a given doctest, continue to run after the first failure", - dest="doctest_continue_on_failure", - ) - - -def pytest_collect_file(path, parent): - config = parent.config - if path.ext == ".py": - if config.option.doctestmodules and not _is_setup_py(config, path, parent): - return DoctestModule(path, parent) - elif _is_doctest(config, path, parent): - return DoctestTextfile(path, parent) - - -def _is_setup_py(config, path, parent): - if path.basename != "setup.py": - return False - contents = path.read() - return "setuptools" in contents or "distutils" in contents - - -def _is_doctest(config, path, parent): - if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): - return True - globs = config.getoption("doctestglob") or ["test*.txt"] - for glob in globs: - if path.check(fnmatch=glob): - return True - return False - - -class ReprFailDoctest(TerminalRepr): - def __init__(self, reprlocation_lines): - # List of (reprlocation, lines) tuples - self.reprlocation_lines = reprlocation_lines - - def toterminal(self, tw): - for reprlocation, lines in self.reprlocation_lines: - for line in lines: - tw.line(line) - reprlocation.toterminal(tw) - - -class MultipleDoctestFailures(Exception): - def __init__(self, failures): - super(MultipleDoctestFailures, self).__init__() - self.failures = failures - - -def _init_runner_class(): - import doctest - - class PytestDoctestRunner(doctest.DebugRunner): - """ - Runner to collect failures. Note that the out variable in this case is - a list instead of a stdout-like object - """ - - def __init__( - self, checker=None, verbose=None, optionflags=0, continue_on_failure=True - ): - doctest.DebugRunner.__init__( - self, checker=checker, verbose=verbose, optionflags=optionflags - ) - self.continue_on_failure = continue_on_failure - - def report_failure(self, out, test, example, got): - failure = doctest.DocTestFailure(test, example, got) - if self.continue_on_failure: - out.append(failure) - else: - raise failure - - def report_unexpected_exception(self, out, test, example, exc_info): - if isinstance(exc_info[1], Skipped): - raise exc_info[1] - failure = doctest.UnexpectedException(test, example, exc_info) - if self.continue_on_failure: - out.append(failure) - else: - raise failure - - return PytestDoctestRunner - - -def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): - # We need this in order to do a lazy import on doctest - global RUNNER_CLASS - if RUNNER_CLASS is None: - RUNNER_CLASS = _init_runner_class() - return RUNNER_CLASS( - checker=checker, - verbose=verbose, - optionflags=optionflags, - continue_on_failure=continue_on_failure, - ) - - -class DoctestItem(pytest.Item): - def __init__(self, name, parent, runner=None, dtest=None): - super(DoctestItem, self).__init__(name, parent) - self.runner = runner - self.dtest = dtest - self.obj = None - self.fixture_request = None - - def setup(self): - if self.dtest is not None: - self.fixture_request = _setup_fixtures(self) - globs = dict(getfixture=self.fixture_request.getfixturevalue) - for name, value in self.fixture_request.getfixturevalue( - "doctest_namespace" - ).items(): - globs[name] = value - self.dtest.globs.update(globs) - - def runtest(self): - _check_all_skipped(self.dtest) - self._disable_output_capturing_for_darwin() - failures = [] - self.runner.run(self.dtest, out=failures) - if failures: - raise MultipleDoctestFailures(failures) - - def _disable_output_capturing_for_darwin(self): - """ - Disable output capturing. Otherwise, stdout is lost to doctest (#985) - """ - if platform.system() != "Darwin": - return - capman = self.config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture(in_=True) - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - def repr_failure(self, excinfo): - import doctest - - failures = None - if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): - failures = [excinfo.value] - elif excinfo.errisinstance(MultipleDoctestFailures): - failures = excinfo.value.failures - - if failures is not None: - reprlocation_lines = [] - for failure in failures: - example = failure.example - test = failure.test - filename = test.filename - if test.lineno is None: - lineno = None - else: - lineno = test.lineno + example.lineno + 1 - message = type(failure).__name__ - reprlocation = ReprFileLocation(filename, lineno, message) - checker = _get_checker() - report_choice = _get_report_choice( - self.config.getoption("doctestreport") - ) - if lineno is not None: - lines = failure.test.docstring.splitlines(False) - # add line numbers to the left of the error message - lines = [ - "%03d %s" % (i + test.lineno + 1, x) - for (i, x) in enumerate(lines) - ] - # trim docstring error lines to 10 - lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] - else: - lines = [ - "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" - ] - indent = ">>>" - for line in example.source.splitlines(): - lines.append("??? %s %s" % (indent, line)) - indent = "..." - if isinstance(failure, doctest.DocTestFailure): - lines += checker.output_difference( - example, failure.got, report_choice - ).split("\n") - else: - inner_excinfo = ExceptionInfo(failure.exc_info) - lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] - lines += traceback.format_exception(*failure.exc_info) - reprlocation_lines.append((reprlocation, lines)) - return ReprFailDoctest(reprlocation_lines) - else: - return super(DoctestItem, self).repr_failure(excinfo) - - def reportinfo(self): - return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name - - -def _get_flag_lookup(): - import doctest - - return dict( - DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, - DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, - NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, - ELLIPSIS=doctest.ELLIPSIS, - IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, - COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, - ALLOW_UNICODE=_get_allow_unicode_flag(), - ALLOW_BYTES=_get_allow_bytes_flag(), - ) - - -def get_optionflags(parent): - optionflags_str = parent.config.getini("doctest_optionflags") - flag_lookup_table = _get_flag_lookup() - flag_acc = 0 - for flag in optionflags_str: - flag_acc |= flag_lookup_table[flag] - return flag_acc - - -def _get_continue_on_failure(config): - continue_on_failure = config.getvalue("doctest_continue_on_failure") - if continue_on_failure: - # We need to turn off this if we use pdb since we should stop at - # the first failure - if config.getvalue("usepdb"): - continue_on_failure = False - return continue_on_failure - - -class DoctestTextfile(pytest.Module): - obj = None - - def collect(self): - import doctest - - # inspired by doctest.testfile; ideally we would use it directly, - # but it doesn't support passing a custom checker - encoding = self.config.getini("doctest_encoding") - text = self.fspath.read_text(encoding) - filename = str(self.fspath) - name = self.fspath.basename - globs = {"__name__": "__main__"} - - optionflags = get_optionflags(self) - - runner = _get_runner( - verbose=0, - optionflags=optionflags, - checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config), - ) - _fix_spoof_python2(runner, encoding) - - parser = doctest.DocTestParser() - test = parser.get_doctest(text, globs, name, filename, 0) - if test.examples: - yield DoctestItem(test.name, self, runner, test) - - -def _check_all_skipped(test): - """raises pytest.skip() if all examples in the given DocTest have the SKIP - option set. - """ - import doctest - - all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) - if all_skipped: - pytest.skip("all tests skipped by +SKIP option") - - -def _is_mocked(obj): - """ - returns if a object is possibly a mock object by checking the existence of a highly improbable attribute - """ - return ( - safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) - is not None - ) - - -@contextmanager -def _patch_unwrap_mock_aware(): - """ - contextmanager which replaces ``inspect.unwrap`` with a version - that's aware of mock objects and doesn't recurse on them - """ - real_unwrap = getattr(inspect, "unwrap", None) - if real_unwrap is None: - yield - else: - - def _mock_aware_unwrap(obj, stop=None): - try: - if stop is None or stop is _is_mocked: - return real_unwrap(obj, stop=_is_mocked) - return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj)) - except Exception as e: - warnings.warn( - "Got %r when unwrapping %r. This is usually caused " - "by a violation of Python's object protocol; see e.g. " - "https://github.com/pytest-dev/pytest/issues/5080" % (e, obj), - PytestWarning, - ) - raise - - inspect.unwrap = _mock_aware_unwrap - try: - yield - finally: - inspect.unwrap = real_unwrap - - -class DoctestModule(pytest.Module): - def collect(self): - import doctest - - class MockAwareDocTestFinder(doctest.DocTestFinder): - """ - a hackish doctest finder that overrides stdlib internals to fix a stdlib bug - - https://github.com/pytest-dev/pytest/issues/3456 - https://bugs.python.org/issue25532 - """ - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - if _is_mocked(obj): - return - with _patch_unwrap_mock_aware(): - - doctest.DocTestFinder._find( - self, tests, obj, name, module, source_lines, globs, seen - ) - - if self.fspath.basename == "conftest.py": - module = self.config.pluginmanager._importconftest(self.fspath) - else: - try: - module = self.fspath.pyimport() - except ImportError: - if self.config.getvalue("doctest_ignore_import_errors"): - pytest.skip("unable to import module %r" % self.fspath) - else: - raise - # uses internal doctest module parsing mechanism - finder = MockAwareDocTestFinder() - optionflags = get_optionflags(self) - runner = _get_runner( - verbose=0, - optionflags=optionflags, - checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config), - ) - - for test in finder.find(module, module.__name__): - if test.examples: # skip empty doctests - yield DoctestItem(test.name, self, runner, test) - - -def _setup_fixtures(doctest_item): - """ - Used by DoctestTextfile and DoctestItem to setup fixture information. - """ - - def func(): - pass - - doctest_item.funcargs = {} - fm = doctest_item.session._fixturemanager - doctest_item._fixtureinfo = fm.getfixtureinfo( - node=doctest_item, func=func, cls=None, funcargs=False - ) - fixture_request = FixtureRequest(doctest_item) - fixture_request._fillfixtures() - return fixture_request - - -def _get_checker(): - """ - Returns a doctest.OutputChecker subclass that takes in account the - ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES - to strip b'' prefixes. - Useful when the same doctest should run in Python 2 and Python 3. - - An inner class is used to avoid importing "doctest" at the module - level. - """ - if hasattr(_get_checker, "LiteralsOutputChecker"): - return _get_checker.LiteralsOutputChecker() - - import doctest - import re - - class LiteralsOutputChecker(doctest.OutputChecker): - """ - Copied from doctest_nose_plugin.py from the nltk project: - https://github.com/nltk/nltk - - Further extended to also support byte literals. - """ - - _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) - _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) - - def check_output(self, want, got, optionflags): - res = doctest.OutputChecker.check_output(self, want, got, optionflags) - if res: - return True - - allow_unicode = optionflags & _get_allow_unicode_flag() - allow_bytes = optionflags & _get_allow_bytes_flag() - if not allow_unicode and not allow_bytes: - return False - - else: # pragma: no cover - - def remove_prefixes(regex, txt): - return re.sub(regex, r"\1\2", txt) - - if allow_unicode: - want = remove_prefixes(self._unicode_literal_re, want) - got = remove_prefixes(self._unicode_literal_re, got) - if allow_bytes: - want = remove_prefixes(self._bytes_literal_re, want) - got = remove_prefixes(self._bytes_literal_re, got) - res = doctest.OutputChecker.check_output(self, want, got, optionflags) - return res - - _get_checker.LiteralsOutputChecker = LiteralsOutputChecker - return _get_checker.LiteralsOutputChecker() - - -def _get_allow_unicode_flag(): - """ - Registers and returns the ALLOW_UNICODE flag. - """ - import doctest - - return doctest.register_optionflag("ALLOW_UNICODE") - - -def _get_allow_bytes_flag(): - """ - Registers and returns the ALLOW_BYTES flag. - """ - import doctest - - return doctest.register_optionflag("ALLOW_BYTES") - - -def _get_report_choice(key): - """ - This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid - importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. - """ - import doctest - - return { - DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, - DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, - DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, - DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, - DOCTEST_REPORT_CHOICE_NONE: 0, - }[key] - - -def _fix_spoof_python2(runner, encoding): - """ - Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This - should patch only doctests for text files because they don't have a way to declare their - encoding. Doctests in docstrings from Python modules don't have the same problem given that - Python already decoded the strings. - - This fixes the problem related in issue #2434. - """ - from _pytest.compat import _PY2 - - if not _PY2: - return - - from doctest import _SpoofOut - - class UnicodeSpoof(_SpoofOut): - def getvalue(self): - result = _SpoofOut.getvalue(self) - if encoding and isinstance(result, bytes): - result = result.decode(encoding) - return result - - runner._fakeout = UnicodeSpoof() - - -@pytest.fixture(scope="session") -def doctest_namespace(): - """ - Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. - """ - return dict() diff --git a/contrib/python/pytest/py2/_pytest/fixtures.py b/contrib/python/pytest/py2/_pytest/fixtures.py deleted file mode 100644 index 280a48608b..0000000000 --- a/contrib/python/pytest/py2/_pytest/fixtures.py +++ /dev/null @@ -1,1356 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import inspect -import itertools -import sys -import warnings -from collections import defaultdict -from collections import deque -from collections import OrderedDict - -import attr -import py -import six - -import _pytest -from _pytest import nodes -from _pytest._code.code import FormattedExcinfo -from _pytest._code.code import TerminalRepr -from _pytest.compat import _format_args -from _pytest.compat import _PytestWrapper -from _pytest.compat import exc_clear -from _pytest.compat import FuncargnamesCompatAttr -from _pytest.compat import get_real_func -from _pytest.compat import get_real_method -from _pytest.compat import getfslineno -from _pytest.compat import getfuncargnames -from _pytest.compat import getimfunc -from _pytest.compat import getlocation -from _pytest.compat import is_generator -from _pytest.compat import isclass -from _pytest.compat import NOTSET -from _pytest.compat import safe_getattr -from _pytest.deprecated import FIXTURE_FUNCTION_CALL -from _pytest.deprecated import FIXTURE_NAMED_REQUEST -from _pytest.outcomes import fail -from _pytest.outcomes import TEST_OUTCOME - - -@attr.s(frozen=True) -class PseudoFixtureDef(object): - cached_result = attr.ib() - scope = attr.ib() - - -def pytest_sessionstart(session): - import _pytest.python - import _pytest.nodes - - scopename2class.update( - { - "package": _pytest.python.Package, - "class": _pytest.python.Class, - "module": _pytest.python.Module, - "function": _pytest.nodes.Item, - "session": _pytest.main.Session, - } - ) - session._fixturemanager = FixtureManager(session) - - -scopename2class = {} - - -scope2props = dict(session=()) -scope2props["package"] = ("fspath",) -scope2props["module"] = ("fspath", "module") -scope2props["class"] = scope2props["module"] + ("cls",) -scope2props["instance"] = scope2props["class"] + ("instance",) -scope2props["function"] = scope2props["instance"] + ("function", "keywords") - - -def scopeproperty(name=None, doc=None): - def decoratescope(func): - scopename = name or func.__name__ - - def provide(self): - if func.__name__ in scope2props[self.scope]: - return func(self) - raise AttributeError( - "%s not available in %s-scoped context" % (scopename, self.scope) - ) - - return property(provide, None, None, func.__doc__) - - return decoratescope - - -def get_scope_package(node, fixturedef): - import pytest - - cls = pytest.Package - current = node - fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py") - while current and ( - type(current) is not cls or fixture_package_name != current.nodeid - ): - current = current.parent - if current is None: - return node.session - return current - - -def get_scope_node(node, scope): - cls = scopename2class.get(scope) - if cls is None: - raise ValueError("unknown scope") - return node.getparent(cls) - - -def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): - # this function will transform all collected calls to a functions - # if they use direct funcargs (i.e. direct parametrization) - # because we want later test execution to be able to rely on - # an existing FixtureDef structure for all arguments. - # XXX we can probably avoid this algorithm if we modify CallSpec2 - # to directly care for creating the fixturedefs within its methods. - if not metafunc._calls[0].funcargs: - return # this function call does not have direct parametrization - # collect funcargs of all callspecs into a list of values - arg2params = {} - arg2scope = {} - for callspec in metafunc._calls: - for argname, argvalue in callspec.funcargs.items(): - assert argname not in callspec.params - callspec.params[argname] = argvalue - arg2params_list = arg2params.setdefault(argname, []) - callspec.indices[argname] = len(arg2params_list) - arg2params_list.append(argvalue) - if argname not in arg2scope: - scopenum = callspec._arg2scopenum.get(argname, scopenum_function) - arg2scope[argname] = scopes[scopenum] - callspec.funcargs.clear() - - # register artificial FixtureDef's so that later at test execution - # time we can rely on a proper FixtureDef to exist for fixture setup. - arg2fixturedefs = metafunc._arg2fixturedefs - for argname, valuelist in arg2params.items(): - # if we have a scope that is higher than function we need - # to make sure we only ever create an according fixturedef on - # a per-scope basis. We thus store and cache the fixturedef on the - # node related to the scope. - scope = arg2scope[argname] - node = None - if scope != "function": - node = get_scope_node(collector, scope) - if node is None: - assert scope == "class" and isinstance(collector, _pytest.python.Module) - # use module-level collector for class-scope (for now) - node = collector - if node and argname in node._name2pseudofixturedef: - arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] - else: - fixturedef = FixtureDef( - fixturemanager, - "", - argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, - False, - False, - ) - arg2fixturedefs[argname] = [fixturedef] - if node is not None: - node._name2pseudofixturedef[argname] = fixturedef - - -def getfixturemarker(obj): - """ return fixturemarker or None if it doesn't exist or raised - exceptions.""" - try: - return getattr(obj, "_pytestfixturefunction", None) - except TEST_OUTCOME: - # some objects raise errors like request (from flask import request) - # we don't expect them to be fixture functions - return None - - -def get_parametrized_fixture_keys(item, scopenum): - """ return list of keys for all parametrized arguments which match - the specified scope. """ - assert scopenum < scopenum_function # function - try: - cs = item.callspec - except AttributeError: - pass - else: - # cs.indices.items() is random order of argnames. Need to - # sort this so that different calls to - # get_parametrized_fixture_keys will be deterministic. - for argname, param_index in sorted(cs.indices.items()): - if cs._arg2scopenum[argname] != scopenum: - continue - if scopenum == 0: # session - key = (argname, param_index) - elif scopenum == 1: # package - key = (argname, param_index, item.fspath.dirpath()) - elif scopenum == 2: # module - key = (argname, param_index, item.fspath) - elif scopenum == 3: # class - key = (argname, param_index, item.fspath, item.cls) - yield key - - -# algorithm for sorting on a per-parametrized resource setup basis -# it is called for scopenum==0 (session) first and performs sorting -# down to the lower scopes such as to minimize number of "high scope" -# setups and teardowns - - -def reorder_items(items): - argkeys_cache = {} - items_by_argkey = {} - for scopenum in range(0, scopenum_function): - argkeys_cache[scopenum] = d = {} - items_by_argkey[scopenum] = item_d = defaultdict(deque) - for item in items: - keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) - if keys: - d[item] = keys - for key in keys: - item_d[key].append(item) - items = OrderedDict.fromkeys(items) - return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) - - -def fix_cache_order(item, argkeys_cache, items_by_argkey): - for scopenum in range(0, scopenum_function): - for key in argkeys_cache[scopenum].get(item, []): - items_by_argkey[scopenum][key].appendleft(item) - - -def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): - if scopenum >= scopenum_function or len(items) < 3: - return items - ignore = set() - items_deque = deque(items) - items_done = OrderedDict() - scoped_items_by_argkey = items_by_argkey[scopenum] - scoped_argkeys_cache = argkeys_cache[scopenum] - while items_deque: - no_argkey_group = OrderedDict() - slicing_argkey = None - while items_deque: - item = items_deque.popleft() - if item in items_done or item in no_argkey_group: - continue - argkeys = OrderedDict.fromkeys( - k for k in scoped_argkeys_cache.get(item, []) if k not in ignore - ) - if not argkeys: - no_argkey_group[item] = None - else: - slicing_argkey, _ = argkeys.popitem() - # we don't have to remove relevant items from later in the deque because they'll just be ignored - matching_items = [ - i for i in scoped_items_by_argkey[slicing_argkey] if i in items - ] - for i in reversed(matching_items): - fix_cache_order(i, argkeys_cache, items_by_argkey) - items_deque.appendleft(i) - break - if no_argkey_group: - no_argkey_group = reorder_items_atscope( - no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 - ) - for item in no_argkey_group: - items_done[item] = None - ignore.add(slicing_argkey) - return items_done - - -def fillfixtures(function): - """ fill missing funcargs for a test function. """ - try: - request = function._request - except AttributeError: - # XXX this special code path is only expected to execute - # with the oejskit plugin. It uses classes with funcargs - # and we thus have to work a bit to allow this. - fm = function.session._fixturemanager - fi = fm.getfixtureinfo(function.parent, function.obj, None) - function._fixtureinfo = fi - request = function._request = FixtureRequest(function) - request._fillfixtures() - # prune out funcargs for jstests - newfuncargs = {} - for name in fi.argnames: - newfuncargs[name] = function.funcargs[name] - function.funcargs = newfuncargs - else: - request._fillfixtures() - - -def get_direct_param_fixture_func(request): - return request.param - - -@attr.s(slots=True) -class FuncFixtureInfo(object): - # original function argument names - argnames = attr.ib(type=tuple) - # argnames that function immediately requires. These include argnames + - # fixture names specified via usefixtures and via autouse=True in fixture - # definitions. - initialnames = attr.ib(type=tuple) - names_closure = attr.ib() # List[str] - name2fixturedefs = attr.ib() # List[str, List[FixtureDef]] - - def prune_dependency_tree(self): - """Recompute names_closure from initialnames and name2fixturedefs - - Can only reduce names_closure, which means that the new closure will - always be a subset of the old one. The order is preserved. - - This method is needed because direct parametrization may shadow some - of the fixtures that were included in the originally built dependency - tree. In this way the dependency tree can get pruned, and the closure - of argnames may get reduced. - """ - closure = set() - working_set = set(self.initialnames) - while working_set: - argname = working_set.pop() - # argname may be smth not included in the original names_closure, - # in which case we ignore it. This currently happens with pseudo - # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. - # So they introduce the new dependency 'request' which might have - # been missing in the original tree (closure). - if argname not in closure and argname in self.names_closure: - closure.add(argname) - if argname in self.name2fixturedefs: - working_set.update(self.name2fixturedefs[argname][-1].argnames) - - self.names_closure[:] = sorted(closure, key=self.names_closure.index) - - -class FixtureRequest(FuncargnamesCompatAttr): - """ A request for a fixture from a test or fixture function. - - A request object gives access to the requesting test context - and has an optional ``param`` attribute in case - the fixture is parametrized indirectly. - """ - - def __init__(self, pyfuncitem): - self._pyfuncitem = pyfuncitem - #: fixture for which this request is being performed - self.fixturename = None - #: Scope string, one of "function", "class", "module", "session" - self.scope = "function" - self._fixture_defs = {} # argname -> FixtureDef - fixtureinfo = pyfuncitem._fixtureinfo - self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() - self._arg2index = {} - self._fixturemanager = pyfuncitem.session._fixturemanager - - @property - def fixturenames(self): - """names of all active fixtures in this request""" - result = list(self._pyfuncitem._fixtureinfo.names_closure) - result.extend(set(self._fixture_defs).difference(result)) - return result - - @property - def node(self): - """ underlying collection node (depends on current request scope)""" - return self._getscopeitem(self.scope) - - def _getnextfixturedef(self, argname): - fixturedefs = self._arg2fixturedefs.get(argname, None) - if fixturedefs is None: - # we arrive here because of a dynamic call to - # getfixturevalue(argname) usage which was naturally - # not known at parsing/collection time - parentid = self._pyfuncitem.parent.nodeid - fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) - self._arg2fixturedefs[argname] = fixturedefs - # fixturedefs list is immutable so we maintain a decreasing index - index = self._arg2index.get(argname, 0) - 1 - if fixturedefs is None or (-index > len(fixturedefs)): - raise FixtureLookupError(argname, self) - self._arg2index[argname] = index - return fixturedefs[index] - - @property - def config(self): - """ the pytest config object associated with this request. """ - return self._pyfuncitem.config - - @scopeproperty() - def function(self): - """ test function object if the request has a per-function scope. """ - return self._pyfuncitem.obj - - @scopeproperty("class") - def cls(self): - """ class (can be None) where the test function was collected. """ - clscol = self._pyfuncitem.getparent(_pytest.python.Class) - if clscol: - return clscol.obj - - @property - def instance(self): - """ instance (can be None) on which test function was collected. """ - # unittest support hack, see _pytest.unittest.TestCaseFunction - try: - return self._pyfuncitem._testcase - except AttributeError: - function = getattr(self, "function", None) - return getattr(function, "__self__", None) - - @scopeproperty() - def module(self): - """ python module object where the test function was collected. """ - return self._pyfuncitem.getparent(_pytest.python.Module).obj - - @scopeproperty() - def fspath(self): - """ the file system path of the test module which collected this test. """ - return self._pyfuncitem.fspath - - @property - def keywords(self): - """ keywords/markers dictionary for the underlying node. """ - return self.node.keywords - - @property - def session(self): - """ pytest session object. """ - return self._pyfuncitem.session - - def addfinalizer(self, finalizer): - """ add finalizer/teardown function to be called after the - last test within the requesting test context finished - execution. """ - # XXX usually this method is shadowed by fixturedef specific ones - self._addfinalizer(finalizer, scope=self.scope) - - def _addfinalizer(self, finalizer, scope): - colitem = self._getscopeitem(scope) - self._pyfuncitem.session._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem - ) - - def applymarker(self, marker): - """ Apply a marker to a single test function invocation. - This method is useful if you don't want to have a keyword/marker - on all function invocations. - - :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object - created by a call to ``pytest.mark.NAME(...)``. - """ - self.node.add_marker(marker) - - def raiseerror(self, msg): - """ raise a FixtureLookupError with the given message. """ - raise self._fixturemanager.FixtureLookupError(None, self, msg) - - def _fillfixtures(self): - item = self._pyfuncitem - fixturenames = getattr(item, "fixturenames", self.fixturenames) - for argname in fixturenames: - if argname not in item.funcargs: - item.funcargs[argname] = self.getfixturevalue(argname) - - def getfixturevalue(self, argname): - """ Dynamically run a named fixture function. - - Declaring fixtures via function argument is recommended where possible. - But if you can only decide whether to use another fixture at test - setup time, you may use this function to retrieve it inside a fixture - or test function body. - """ - return self._get_active_fixturedef(argname).cached_result[0] - - def getfuncargvalue(self, argname): - """ Deprecated, use getfixturevalue. """ - from _pytest import deprecated - - warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) - return self.getfixturevalue(argname) - - def _get_active_fixturedef(self, argname): - try: - return self._fixture_defs[argname] - except KeyError: - try: - fixturedef = self._getnextfixturedef(argname) - except FixtureLookupError: - if argname == "request": - cached_result = (self, [0], None) - scope = "function" - return PseudoFixtureDef(cached_result, scope) - raise - # remove indent to prevent the python3 exception - # from leaking into the call - self._compute_fixture_value(fixturedef) - self._fixture_defs[argname] = fixturedef - return fixturedef - - def _get_fixturestack(self): - current = self - values = [] - while 1: - fixturedef = getattr(current, "_fixturedef", None) - if fixturedef is None: - values.reverse() - return values - values.append(fixturedef) - current = current._parent_request - - def _compute_fixture_value(self, fixturedef): - """ - Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will - force the FixtureDef object to throw away any previous results and compute a new fixture value, which - will be stored into the FixtureDef object itself. - - :param FixtureDef fixturedef: - """ - # prepare a subrequest object before calling fixture function - # (latter managed by fixturedef) - argname = fixturedef.argname - funcitem = self._pyfuncitem - scope = fixturedef.scope - try: - param = funcitem.callspec.getparam(argname) - except (AttributeError, ValueError): - param = NOTSET - param_index = 0 - has_params = fixturedef.params is not None - fixtures_not_supported = getattr(funcitem, "nofuncargs", False) - if has_params and fixtures_not_supported: - msg = ( - "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" - "Node id: {nodeid}\n" - "Function type: {typename}" - ).format( - name=funcitem.name, - nodeid=funcitem.nodeid, - typename=type(funcitem).__name__, - ) - fail(msg, pytrace=False) - if has_params: - frame = inspect.stack()[3] - frameinfo = inspect.getframeinfo(frame[0]) - source_path = frameinfo.filename - source_lineno = frameinfo.lineno - source_path = py.path.local(source_path) - if source_path.relto(funcitem.config.rootdir): - source_path = source_path.relto(funcitem.config.rootdir) - msg = ( - "The requested fixture has no parameter defined for test:\n" - " {}\n\n" - "Requested fixture '{}' defined in:\n{}" - "\n\nRequested here:\n{}:{}".format( - funcitem.nodeid, - fixturedef.argname, - getlocation(fixturedef.func, funcitem.config.rootdir), - source_path, - source_lineno, - ) - ) - fail(msg, pytrace=False) - else: - param_index = funcitem.callspec.indices[argname] - # if a parametrize invocation set a scope it will override - # the static scope defined with the fixture function - paramscopenum = funcitem.callspec._arg2scopenum.get(argname) - if paramscopenum is not None: - scope = scopes[paramscopenum] - - subrequest = SubRequest(self, scope, param, param_index, fixturedef) - - # check if a higher-level scoped fixture accesses a lower level one - subrequest._check_scope(argname, self.scope, scope) - - # clear sys.exc_info before invoking the fixture (python bug?) - # if it's not explicitly cleared it will leak into the call - exc_clear() - try: - # call the fixture function - fixturedef.execute(request=subrequest) - finally: - self._schedule_finalizers(fixturedef, subrequest) - - def _schedule_finalizers(self, fixturedef, subrequest): - # if fixture function failed it might have registered finalizers - self.session._setupstate.addfinalizer( - functools.partial(fixturedef.finish, request=subrequest), subrequest.node - ) - - def _check_scope(self, argname, invoking_scope, requested_scope): - if argname == "request": - return - if scopemismatch(invoking_scope, requested_scope): - # try to report something helpful - lines = self._factorytraceback() - fail( - "ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" - % ((requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False, - ) - - def _factorytraceback(self): - lines = [] - for fixturedef in self._get_fixturestack(): - factory = fixturedef.func - fs, lineno = getfslineno(factory) - p = self._pyfuncitem.session.fspath.bestrelpath(fs) - args = _format_args(factory) - lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) - return lines - - def _getscopeitem(self, scope): - if scope == "function": - # this might also be a non-function Item despite its attribute name - return self._pyfuncitem - if scope == "package": - node = get_scope_package(self._pyfuncitem, self._fixturedef) - else: - node = get_scope_node(self._pyfuncitem, scope) - if node is None and scope == "class": - # fallback to function item itself - node = self._pyfuncitem - assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( - scope, self._pyfuncitem - ) - return node - - def __repr__(self): - return "<FixtureRequest for %r>" % (self.node) - - -class SubRequest(FixtureRequest): - """ a sub request for handling getting a fixture from a - test function/fixture. """ - - def __init__(self, request, scope, param, param_index, fixturedef): - self._parent_request = request - self.fixturename = fixturedef.argname - if param is not NOTSET: - self.param = param - self.param_index = param_index - self.scope = scope - self._fixturedef = fixturedef - self._pyfuncitem = request._pyfuncitem - self._fixture_defs = request._fixture_defs - self._arg2fixturedefs = request._arg2fixturedefs - self._arg2index = request._arg2index - self._fixturemanager = request._fixturemanager - - def __repr__(self): - return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem) - - def addfinalizer(self, finalizer): - self._fixturedef.addfinalizer(finalizer) - - def _schedule_finalizers(self, fixturedef, subrequest): - # if the executing fixturedef was not explicitly requested in the argument list (via - # getfixturevalue inside the fixture call) then ensure this fixture def will be finished - # first - if fixturedef.argname not in self.funcargnames: - fixturedef.addfinalizer( - functools.partial(self._fixturedef.finish, request=self) - ) - super(SubRequest, self)._schedule_finalizers(fixturedef, subrequest) - - -scopes = "session package module class function".split() -scopenum_function = scopes.index("function") - - -def scopemismatch(currentscope, newscope): - return scopes.index(newscope) > scopes.index(currentscope) - - -def scope2index(scope, descr, where=None): - """Look up the index of ``scope`` and raise a descriptive value error - if not defined. - """ - try: - return scopes.index(scope) - except ValueError: - fail( - "{} {}got an unexpected scope value '{}'".format( - descr, "from {} ".format(where) if where else "", scope - ), - pytrace=False, - ) - - -class FixtureLookupError(LookupError): - """ could not return a requested Fixture (missing or invalid). """ - - def __init__(self, argname, request, msg=None): - self.argname = argname - self.request = request - self.fixturestack = request._get_fixturestack() - self.msg = msg - - def formatrepr(self): - tblines = [] - addline = tblines.append - stack = [self.request._pyfuncitem.obj] - stack.extend(map(lambda x: x.func, self.fixturestack)) - msg = self.msg - if msg is not None: - # the last fixture raise an error, let's present - # it at the requesting side - stack = stack[:-1] - for function in stack: - fspath, lineno = getfslineno(function) - try: - lines, _ = inspect.getsourcelines(get_real_func(function)) - except (IOError, IndexError, TypeError): - error_msg = "file %s, line %s: source code not available" - addline(error_msg % (fspath, lineno + 1)) - else: - addline("file %s, line %s" % (fspath, lineno + 1)) - for i, line in enumerate(lines): - line = line.rstrip() - addline(" " + line) - if line.lstrip().startswith("def"): - break - - if msg is None: - fm = self.request._fixturemanager - available = set() - parentid = self.request._pyfuncitem.parent.nodeid - for name, fixturedefs in fm._arg2fixturedefs.items(): - faclist = list(fm._matchfactories(fixturedefs, parentid)) - if faclist: - available.add(name) - if self.argname in available: - msg = " recursive dependency involving fixture '{}' detected".format( - self.argname - ) - else: - msg = "fixture '{}' not found".format(self.argname) - msg += "\n available fixtures: {}".format(", ".join(sorted(available))) - msg += "\n use 'pytest --fixtures [testpath]' for help on them." - - return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) - - -class FixtureLookupErrorRepr(TerminalRepr): - def __init__(self, filename, firstlineno, tblines, errorstring, argname): - self.tblines = tblines - self.errorstring = errorstring - self.filename = filename - self.firstlineno = firstlineno - self.argname = argname - - def toterminal(self, tw): - # tw.line("FixtureLookupError: %s" %(self.argname), red=True) - for tbline in self.tblines: - tw.line(tbline.rstrip()) - lines = self.errorstring.split("\n") - if lines: - tw.line( - "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), - red=True, - ) - for line in lines[1:]: - tw.line( - "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), - red=True, - ) - tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) - - -def fail_fixturefunc(fixturefunc, msg): - fs, lineno = getfslineno(fixturefunc) - location = "%s:%s" % (fs, lineno + 1) - source = _pytest._code.Source(fixturefunc) - fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) - - -def call_fixture_func(fixturefunc, request, kwargs): - yieldctx = is_generator(fixturefunc) - if yieldctx: - it = fixturefunc(**kwargs) - res = next(it) - finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) - request.addfinalizer(finalizer) - else: - res = fixturefunc(**kwargs) - return res - - -def _teardown_yield_fixture(fixturefunc, it): - """Executes the teardown of a fixture function by advancing the iterator after the - yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" - try: - next(it) - except StopIteration: - pass - else: - fail_fixturefunc( - fixturefunc, "yield_fixture function has more than one 'yield'" - ) - - -class FixtureDef(object): - """ A container for a factory definition. """ - - def __init__( - self, - fixturemanager, - baseid, - argname, - func, - scope, - params, - unittest=False, - ids=None, - ): - self._fixturemanager = fixturemanager - self.baseid = baseid or "" - self.has_location = baseid is not None - self.func = func - self.argname = argname - self.scope = scope - self.scopenum = scope2index( - scope or "function", - descr="Fixture '{}'".format(func.__name__), - where=baseid, - ) - self.params = params - self.argnames = getfuncargnames(func, is_method=unittest) - self.unittest = unittest - self.ids = ids - self._finalizers = [] - - def addfinalizer(self, finalizer): - self._finalizers.append(finalizer) - - def finish(self, request): - exceptions = [] - try: - while self._finalizers: - try: - func = self._finalizers.pop() - func() - except: # noqa - exceptions.append(sys.exc_info()) - if exceptions: - e = exceptions[0] - # Ensure to not keep frame references through traceback. - del exceptions - six.reraise(*e) - finally: - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) - hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) - # even if finalization fails, we invalidate - # the cached fixture value and remove - # all finalizers because they may be bound methods which will - # keep instances alive - if hasattr(self, "cached_result"): - del self.cached_result - self._finalizers = [] - - def execute(self, request): - # get required arguments and register our own finish() - # with their finalization - for argname in self.argnames: - fixturedef = request._get_active_fixturedef(argname) - if argname != "request": - fixturedef.addfinalizer(functools.partial(self.finish, request=request)) - - my_cache_key = request.param_index - cached_result = getattr(self, "cached_result", None) - if cached_result is not None: - result, cache_key, err = cached_result - if my_cache_key == cache_key: - if err is not None: - six.reraise(*err) - else: - return result - # we have a previous but differently parametrized fixture instance - # so we need to tear it down before creating a new one - self.finish(request) - assert not hasattr(self, "cached_result") - - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) - return hook.pytest_fixture_setup(fixturedef=self, request=request) - - def __repr__(self): - return "<FixtureDef argname=%r scope=%r baseid=%r>" % ( - self.argname, - self.scope, - self.baseid, - ) - - -def resolve_fixture_function(fixturedef, request): - """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific - instances and bound methods. - """ - fixturefunc = fixturedef.func - if fixturedef.unittest: - if request.instance is not None: - # bind the unbound method to the TestCase instance - fixturefunc = fixturedef.func.__get__(request.instance) - else: - # the fixture function needs to be bound to the actual - # request.instance so that code working with "fixturedef" behaves - # as expected. - if request.instance is not None: - fixturefunc = getimfunc(fixturedef.func) - if fixturefunc != fixturedef.func: - fixturefunc = fixturefunc.__get__(request.instance) - return fixturefunc - - -def pytest_fixture_setup(fixturedef, request): - """ Execution of fixture setup. """ - kwargs = {} - for argname in fixturedef.argnames: - fixdef = request._get_active_fixturedef(argname) - result, arg_cache_key, exc = fixdef.cached_result - request._check_scope(argname, request.scope, fixdef.scope) - kwargs[argname] = result - - fixturefunc = resolve_fixture_function(fixturedef, request) - my_cache_key = request.param_index - try: - result = call_fixture_func(fixturefunc, request, kwargs) - except TEST_OUTCOME: - fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) - raise - fixturedef.cached_result = (result, my_cache_key, None) - return result - - -def _ensure_immutable_ids(ids): - if ids is None: - return - if callable(ids): - return ids - return tuple(ids) - - -def wrap_function_to_error_out_if_called_directly(function, fixture_marker): - """Wrap the given fixture function so we can raise an error about it being called directly, - instead of used as an argument in a test function. - """ - message = FIXTURE_FUNCTION_CALL.format( - name=fixture_marker.name or function.__name__ - ) - - @six.wraps(function) - def result(*args, **kwargs): - fail(message, pytrace=False) - - # keep reference to the original function in our own custom attribute so we don't unwrap - # further than this point and lose useful wrappings like @mock.patch (#3774) - result.__pytest_wrapped__ = _PytestWrapper(function) - - return result - - -@attr.s(frozen=True) -class FixtureFunctionMarker(object): - scope = attr.ib() - params = attr.ib(converter=attr.converters.optional(tuple)) - autouse = attr.ib(default=False) - ids = attr.ib(default=None, converter=_ensure_immutable_ids) - name = attr.ib(default=None) - - def __call__(self, function): - if isclass(function): - raise ValueError("class fixtures not supported (maybe in the future)") - - if getattr(function, "_pytestfixturefunction", False): - raise ValueError( - "fixture is being applied more than once to the same function" - ) - - function = wrap_function_to_error_out_if_called_directly(function, self) - - name = self.name or function.__name__ - if name == "request": - warnings.warn(FIXTURE_NAMED_REQUEST) - function._pytestfixturefunction = self - return function - - -def fixture(scope="function", params=None, autouse=False, ids=None, name=None): - """Decorator to mark a fixture factory function. - - This decorator can be used, with or without parameters, to define a - fixture function. - - The name of the fixture function can later be referenced to cause its - invocation ahead of running tests: test - modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` - marker. - - Test functions can directly use fixture names as input - arguments in which case the fixture instance returned from the fixture - function will be injected. - - Fixtures can provide their values to test functions using ``return`` or ``yield`` - statements. When using ``yield`` the code block after the ``yield`` statement is executed - as teardown code regardless of the test outcome, and must yield exactly once. - - :arg scope: the scope for which this fixture is shared, one of - ``"function"`` (default), ``"class"``, ``"module"``, - ``"package"`` or ``"session"``. - - ``"package"`` is considered **experimental** at this time. - - :arg params: an optional list of parameters which will cause multiple - invocations of the fixture function and all of the tests - using it. - The current parameter is available in ``request.param``. - - :arg autouse: if True, the fixture func is activated for all tests that - can see it. If False (the default) then an explicit - reference is needed to activate the fixture. - - :arg ids: list of string ids each corresponding to the params - so that they are part of the test id. If no ids are provided - they will be generated automatically from the params. - - :arg name: the name of the fixture. This defaults to the name of the - decorated function. If a fixture is used in the same module in - which it is defined, the function name of the fixture will be - shadowed by the function arg that requests the fixture; one way - to resolve this is to name the decorated function - ``fixture_<fixturename>`` and then use - ``@pytest.fixture(name='<fixturename>')``. - """ - if callable(scope) and params is None and autouse is False: - # direct decoration - return FixtureFunctionMarker("function", params, autouse, name=name)(scope) - if params is not None and not isinstance(params, (list, tuple)): - params = list(params) - return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) - - -def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): - """ (return a) decorator to mark a yield-fixture factory function. - - .. deprecated:: 3.0 - Use :py:func:`pytest.fixture` directly instead. - """ - return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) - - -defaultfuncargprefixmarker = fixture() - - -@fixture(scope="session") -def pytestconfig(request): - """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - - Example:: - - def test_foo(pytestconfig): - if pytestconfig.getoption("verbose") > 0: - ... - - """ - return request.config - - -def pytest_addoption(parser): - parser.addini( - "usefixtures", - type="args", - default=[], - help="list of default fixtures to be used with this project", - ) - - -class FixtureManager(object): - """ - pytest fixtures definitions and information is stored and managed - from this class. - - During collection fm.parsefactories() is called multiple times to parse - fixture function definitions into FixtureDef objects and internal - data structures. - - During collection of test functions, metafunc-mechanics instantiate - a FuncFixtureInfo object which is cached per node/func-name. - This FuncFixtureInfo object is later retrieved by Function nodes - which themselves offer a fixturenames attribute. - - The FuncFixtureInfo object holds information about fixtures and FixtureDefs - relevant for a particular function. An initial list of fixtures is - assembled like this: - - - ini-defined usefixtures - - autouse-marked fixtures along the collection chain up from the function - - usefixtures markers at module/class/function level - - test function funcargs - - Subsequently the funcfixtureinfo.fixturenames attribute is computed - as the closure of the fixtures needed to setup the initial fixtures, - i. e. fixtures needed by fixture functions themselves are appended - to the fixturenames list. - - Upon the test-setup phases all fixturenames are instantiated, retrieved - by a lookup of their FuncFixtureInfo. - """ - - FixtureLookupError = FixtureLookupError - FixtureLookupErrorRepr = FixtureLookupErrorRepr - - def __init__(self, session): - self.session = session - self.config = session.config - self._arg2fixturedefs = {} - self._holderobjseen = set() - self._arg2finish = {} - self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] - session.config.pluginmanager.register(self, "funcmanage") - - def _get_direct_parametrize_args(self, node): - """This function returns all the direct parametrization - arguments of a node, so we don't mistake them for fixtures - - Check https://github.com/pytest-dev/pytest/issues/5036 - - This things are done later as well when dealing with parametrization - so this could be improved - """ - from _pytest.mark import ParameterSet - - parametrize_argnames = [] - for marker in node.iter_markers(name="parametrize"): - if not marker.kwargs.get("indirect", False): - p_argnames, _ = ParameterSet._parse_parametrize_args( - *marker.args, **marker.kwargs - ) - parametrize_argnames.extend(p_argnames) - - return parametrize_argnames - - def getfixtureinfo(self, node, func, cls, funcargs=True): - if funcargs and not getattr(node, "nofuncargs", False): - argnames = getfuncargnames(func, cls=cls) - else: - argnames = () - - usefixtures = itertools.chain.from_iterable( - mark.args for mark in node.iter_markers(name="usefixtures") - ) - initialnames = tuple(usefixtures) + argnames - fm = node.session._fixturemanager - initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( - initialnames, node, ignore_args=self._get_direct_parametrize_args(node) - ) - return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) - - def pytest_plugin_registered(self, plugin): - nodeid = None - try: - p = py.path.local(plugin.__file__).realpath() - except AttributeError: - pass - else: - # construct the base nodeid which is later used to check - # what fixtures are visible for particular tests (as denoted - # by their test id) - if p.basename.startswith("conftest.py"): - nodeid = p.dirpath().relto(self.config.rootdir) - if p.sep != nodes.SEP: - nodeid = nodeid.replace(p.sep, nodes.SEP) - - self.parsefactories(plugin, nodeid) - - def _getautousenames(self, nodeid): - """ return a tuple of fixture names to be used. """ - autousenames = [] - for baseid, basenames in self._nodeid_and_autousenames: - if nodeid.startswith(baseid): - if baseid: - i = len(baseid) - nextchar = nodeid[i : i + 1] - if nextchar and nextchar not in ":/": - continue - autousenames.extend(basenames) - return autousenames - - def getfixtureclosure(self, fixturenames, parentnode, ignore_args=()): - # collect the closure of all fixtures , starting with the given - # fixturenames as the initial set. As we have to visit all - # factory definitions anyway, we also return an arg2fixturedefs - # mapping so that the caller can reuse it and does not have - # to re-discover fixturedefs again for each fixturename - # (discovering matching fixtures for a given name/node is expensive) - - parentid = parentnode.nodeid - fixturenames_closure = self._getautousenames(parentid) - - def merge(otherlist): - for arg in otherlist: - if arg not in fixturenames_closure: - fixturenames_closure.append(arg) - - merge(fixturenames) - - # at this point, fixturenames_closure contains what we call "initialnames", - # which is a set of fixturenames the function immediately requests. We - # need to return it as well, so save this. - initialnames = tuple(fixturenames_closure) - - arg2fixturedefs = {} - lastlen = -1 - while lastlen != len(fixturenames_closure): - lastlen = len(fixturenames_closure) - for argname in fixturenames_closure: - if argname in ignore_args: - continue - if argname in arg2fixturedefs: - continue - fixturedefs = self.getfixturedefs(argname, parentid) - if fixturedefs: - arg2fixturedefs[argname] = fixturedefs - merge(fixturedefs[-1].argnames) - - def sort_by_scope(arg_name): - try: - fixturedefs = arg2fixturedefs[arg_name] - except KeyError: - return scopes.index("function") - else: - return fixturedefs[-1].scopenum - - fixturenames_closure.sort(key=sort_by_scope) - return initialnames, fixturenames_closure, arg2fixturedefs - - def pytest_generate_tests(self, metafunc): - for argname in metafunc.fixturenames: - faclist = metafunc._arg2fixturedefs.get(argname) - if faclist: - fixturedef = faclist[-1] - if fixturedef.params is not None: - markers = list(metafunc.definition.iter_markers("parametrize")) - for parametrize_mark in markers: - if "argnames" in parametrize_mark.kwargs: - argnames = parametrize_mark.kwargs["argnames"] - else: - argnames = parametrize_mark.args[0] - - if not isinstance(argnames, (tuple, list)): - argnames = [ - x.strip() for x in argnames.split(",") if x.strip() - ] - if argname in argnames: - break - else: - metafunc.parametrize( - argname, - fixturedef.params, - indirect=True, - scope=fixturedef.scope, - ids=fixturedef.ids, - ) - else: - continue # will raise FixtureLookupError at setup time - - def pytest_collection_modifyitems(self, items): - # separate parametrized setups - items[:] = reorder_items(items) - - def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): - if nodeid is not NOTSET: - holderobj = node_or_obj - else: - holderobj = node_or_obj.obj - nodeid = node_or_obj.nodeid - if holderobj in self._holderobjseen: - return - - self._holderobjseen.add(holderobj) - autousenames = [] - for name in dir(holderobj): - # The attribute can be an arbitrary descriptor, so the attribute - # access below can raise. safe_getatt() ignores such exceptions. - obj = safe_getattr(holderobj, name, None) - marker = getfixturemarker(obj) - if not isinstance(marker, FixtureFunctionMarker): - # magic globals with __getattr__ might have got us a wrong - # fixture attribute - continue - - if marker.name: - name = marker.name - - # during fixture definition we wrap the original fixture function - # to issue a warning if called directly, so here we unwrap it in order to not emit the warning - # when pytest itself calls the fixture function - if six.PY2 and unittest: - # hack on Python 2 because of the unbound methods - obj = get_real_func(obj) - else: - obj = get_real_method(obj, holderobj) - - fixture_def = FixtureDef( - self, - nodeid, - name, - obj, - marker.scope, - marker.params, - unittest=unittest, - ids=marker.ids, - ) - - faclist = self._arg2fixturedefs.setdefault(name, []) - if fixture_def.has_location: - faclist.append(fixture_def) - else: - # fixturedefs with no location are at the front - # so this inserts the current fixturedef after the - # existing fixturedefs from external plugins but - # before the fixturedefs provided in conftests. - i = len([f for f in faclist if not f.has_location]) - faclist.insert(i, fixture_def) - if marker.autouse: - autousenames.append(name) - - if autousenames: - self._nodeid_and_autousenames.append((nodeid or "", autousenames)) - - def getfixturedefs(self, argname, nodeid): - """ - Gets a list of fixtures which are applicable to the given node id. - - :param str argname: name of the fixture to search for - :param str nodeid: full node id of the requesting test. - :return: list[FixtureDef] - """ - try: - fixturedefs = self._arg2fixturedefs[argname] - except KeyError: - return None - return tuple(self._matchfactories(fixturedefs, nodeid)) - - def _matchfactories(self, fixturedefs, nodeid): - for fixturedef in fixturedefs: - if nodes.ischildnode(fixturedef.baseid, nodeid): - yield fixturedef diff --git a/contrib/python/pytest/py2/_pytest/freeze_support.py b/contrib/python/pytest/py2/_pytest/freeze_support.py deleted file mode 100644 index aeeec2a56b..0000000000 --- a/contrib/python/pytest/py2/_pytest/freeze_support.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Provides a function to report all internal modules for using freezing tools -pytest -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -def freeze_includes(): - """ - Returns a list of module names used by pytest that should be - included by cx_freeze. - """ - import py - import _pytest - - result = list(_iter_all_modules(py)) - result += list(_iter_all_modules(_pytest)) - return result - - -def _iter_all_modules(package, prefix=""): - """ - Iterates over the names of all modules that can be found in the given - package, recursively. - Example: - _iter_all_modules(_pytest) -> - ['_pytest.assertion.newinterpret', - '_pytest.capture', - '_pytest.core', - ... - ] - """ - import os - import pkgutil - - if type(package) is not str: - path, prefix = package.__path__[0], package.__name__ + "." - else: - path = package - for _, name, is_package in pkgutil.iter_modules([path]): - if is_package: - for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): - yield prefix + m - else: - yield prefix + name diff --git a/contrib/python/pytest/py2/_pytest/helpconfig.py b/contrib/python/pytest/py2/_pytest/helpconfig.py deleted file mode 100644 index 5681160160..0000000000 --- a/contrib/python/pytest/py2/_pytest/helpconfig.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -""" version info, help messages, tracing configuration. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import sys -from argparse import Action - -import py - -import pytest -from _pytest.config import PrintHelp - - -class HelpAction(Action): - """This is an argparse Action that will raise an exception in - order to skip the rest of the argument parsing when --help is passed. - This prevents argparse from quitting due to missing required arguments - when any are defined, for example by ``pytest_addoption``. - This is similar to the way that the builtin argparse --help option is - implemented by raising SystemExit. - """ - - def __init__(self, option_strings, dest=None, default=False, help=None): - super(HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=True, - default=default, - nargs=0, - help=help, - ) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, self.const) - - # We should only skip the rest of the parsing after preparse is done - if getattr(parser._parser, "after_preparse", False): - raise PrintHelp - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--version", - action="store_true", - help="display pytest lib version and import information.", - ) - group._addoption( - "-h", - "--help", - action=HelpAction, - dest="help", - help="show help message and configuration info", - ) - group._addoption( - "-p", - action="append", - dest="plugins", - default=[], - metavar="name", - help="early-load given plugin module name or entry point (multi-allowed). " - "To avoid loading of plugins, use the `no:` prefix, e.g. " - "`no:doctest`.", - ) - group.addoption( - "--traceconfig", - "--trace-config", - action="store_true", - default=False, - help="trace considerations of conftest.py files.", - ), - group.addoption( - "--debug", - action="store_true", - dest="debug", - default=False, - help="store internal tracing debug information in 'pytestdebug.log'.", - ) - group._addoption( - "-o", - "--override-ini", - dest="override_ini", - action="append", - help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_cmdline_parse(): - outcome = yield - config = outcome.get_result() - if config.option.debug: - path = os.path.abspath("pytestdebug.log") - debugfile = open(path, "w") - debugfile.write( - "versions pytest-%s, py-%s, " - "python-%s\ncwd=%s\nargs=%s\n\n" - % ( - pytest.__version__, - py.__version__, - ".".join(map(str, sys.version_info)), - os.getcwd(), - config._origargs, - ) - ) - config.trace.root.setwriter(debugfile.write) - undo_tracing = config.pluginmanager.enable_tracing() - sys.stderr.write("writing pytestdebug information to %s\n" % path) - - def unset_tracing(): - debugfile.close() - sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) - config.trace.root.setwriter(None) - undo_tracing() - - config.add_cleanup(unset_tracing) - - -def showversion(config): - p = py.path.local(pytest.__file__) - sys.stderr.write( - "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) - ) - plugininfo = getpluginversioninfo(config) - if plugininfo: - for line in plugininfo: - sys.stderr.write(line + "\n") - - -def pytest_cmdline_main(config): - if config.option.version: - showversion(config) - return 0 - elif config.option.help: - config._do_configure() - showhelp(config) - config._ensure_unconfigure() - return 0 - - -def showhelp(config): - import textwrap - - reporter = config.pluginmanager.get_plugin("terminalreporter") - tw = reporter._tw - tw.write(config._parser.optparser.format_help()) - tw.line() - tw.line( - "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" - ) - tw.line() - - columns = tw.fullwidth # costly call - indent_len = 24 # based on argparse's max_help_position=24 - indent = " " * indent_len - for name in config._parser._ininames: - help, type, default = config._parser._inidict[name] - if type is None: - type = "string" - spec = "%s (%s):" % (name, type) - tw.write(" %s" % spec) - spec_len = len(spec) - if spec_len > (indent_len - 3): - # Display help starting at a new line. - tw.line() - helplines = textwrap.wrap( - help, - columns, - initial_indent=indent, - subsequent_indent=indent, - break_on_hyphens=False, - ) - - for line in helplines: - tw.line(line) - else: - # Display help starting after the spec, following lines indented. - tw.write(" " * (indent_len - spec_len - 2)) - wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) - - tw.line(wrapped[0]) - for line in wrapped[1:]: - tw.line(indent + line) - - tw.line() - tw.line("environment variables:") - vars = [ - ("PYTEST_ADDOPTS", "extra command line options"), - ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), - ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), - ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), - ] - for name, help in vars: - tw.line(" %-24s %s" % (name, help)) - tw.line() - tw.line() - - tw.line("to see available markers type: pytest --markers") - tw.line("to see available fixtures type: pytest --fixtures") - tw.line( - "(shown according to specified file_or_dir or current dir " - "if not specified; fixtures with leading '_' are only shown " - "with the '-v' option" - ) - - for warningreport in reporter.stats.get("warnings", []): - tw.line("warning : " + warningreport.message, red=True) - return - - -conftest_options = [("pytest_plugins", "list of plugin names to load")] - - -def getpluginversioninfo(config): - lines = [] - plugininfo = config.pluginmanager.list_plugin_distinfo() - if plugininfo: - lines.append("setuptools registered plugins:") - for plugin, dist in plugininfo: - loc = getattr(plugin, "__file__", repr(plugin)) - content = "%s-%s at %s" % (dist.project_name, dist.version, loc) - lines.append(" " + content) - return lines - - -def pytest_report_header(config): - lines = [] - if config.option.debug or config.option.traceconfig: - lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) - - verinfo = getpluginversioninfo(config) - if verinfo: - lines.extend(verinfo) - - if config.option.traceconfig: - lines.append("active plugins:") - items = config.pluginmanager.list_name_plugin() - for name, plugin in items: - if hasattr(plugin, "__file__"): - r = plugin.__file__ - else: - r = repr(plugin) - lines.append(" %-20s: %s" % (name, r)) - return lines diff --git a/contrib/python/pytest/py2/_pytest/hookspec.py b/contrib/python/pytest/py2/_pytest/hookspec.py deleted file mode 100644 index 7ab6154b17..0000000000 --- a/contrib/python/pytest/py2/_pytest/hookspec.py +++ /dev/null @@ -1,639 +0,0 @@ -# -*- coding: utf-8 -*- -""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ -from pluggy import HookspecMarker - -from _pytest.deprecated import PYTEST_LOGWARNING - -hookspec = HookspecMarker("pytest") - -# ------------------------------------------------------------------------- -# Initialization hooks called for every plugin -# ------------------------------------------------------------------------- - - -@hookspec(historic=True) -def pytest_addhooks(pluginmanager): - """called at plugin registration time to allow adding new hooks via a call to - ``pluginmanager.add_hookspecs(module_or_class, prefix)``. - - - :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_plugin_registered(plugin, manager): - """ a new pytest plugin got registered. - - :param plugin: the plugin module or instance - :param _pytest.config.PytestPluginManager manager: pytest plugin manager - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_addoption(parser): - """register argparse-style options and ini-style config values, - called once at the beginning of a test run. - - .. note:: - - This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directory due to how pytest - :ref:`discovers plugins during startup <pluginorder>`. - - :arg _pytest.config.Parser parser: To add command line options, call - :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. - To add ini-file values call :py:func:`parser.addini(...) - <_pytest.config.Parser.addini>`. - - Options can later be accessed through the - :py:class:`config <_pytest.config.Config>` object, respectively: - - - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to - retrieve the value of a command line option. - - - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve - a value read from an ini-style file. - - The config object is passed around on many internal objects via the ``.config`` - attribute or can be retrieved as the ``pytestconfig`` fixture. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_configure(config): - """ - Allows plugins and conftest files to perform initial configuration. - - This hook is called for every plugin and initial conftest file - after command line options have been parsed. - - After that, the hook is called for other conftest files as they are - imported. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - - :arg _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# Bootstrapping hooks called for plugins registered early enough: -# internal and 3rd party plugins. -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_cmdline_parse(pluginmanager, args): - """return initialized config object, parsing the specified args. - - Stops at first non-None result, see :ref:`firstresult` - - .. note:: - This hook will only be called for plugin classes passed to the ``plugins`` arg when using `pytest.main`_ to - perform an in-process test run. - - :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager - :param list[str] args: list of arguments passed on the command line - """ - - -def pytest_cmdline_preparse(config, args): - """(**Deprecated**) modify command line arguments before option parsing. - - This hook is considered deprecated and will be removed in a future pytest version. Consider - using :func:`pytest_load_initial_conftests` instead. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - :param _pytest.config.Config config: pytest config object - :param list[str] args: list of arguments passed on the command line - """ - - -@hookspec(firstresult=True) -def pytest_cmdline_main(config): - """ called for performing the main command line action. The default - implementation will invoke the configure hooks and runtest_mainloop. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.config.Config config: pytest config object - """ - - -def pytest_load_initial_conftests(early_config, parser, args): - """ implements the loading of initial conftest files ahead - of command line option parsing. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - :param _pytest.config.Config early_config: pytest config object - :param list[str] args: list of arguments passed on the command line - :param _pytest.config.Parser parser: to add command line options - """ - - -# ------------------------------------------------------------------------- -# collection hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_collection(session): - """Perform the collection protocol for the given session. - - Stops at first non-None result, see :ref:`firstresult`. - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_collection_modifyitems(session, config, items): - """ called after collection has been performed, may filter or re-order - the items in-place. - - :param _pytest.main.Session session: the pytest session object - :param _pytest.config.Config config: pytest config object - :param List[_pytest.nodes.Item] items: list of item objects - """ - - -def pytest_collection_finish(session): - """ called after collection has been performed and modified. - - :param _pytest.main.Session session: the pytest session object - """ - - -@hookspec(firstresult=True) -def pytest_ignore_collect(path, config): - """ return True to prevent considering this path for collection. - This hook is consulted for all files and directories prior to calling - more specific hooks. - - Stops at first non-None result, see :ref:`firstresult` - - :param path: a :py:class:`py.path.local` - the path to analyze - :param _pytest.config.Config config: pytest config object - """ - - -@hookspec(firstresult=True) -def pytest_collect_directory(path, parent): - """ called before traversing a directory for collection files. - - Stops at first non-None result, see :ref:`firstresult` - - :param path: a :py:class:`py.path.local` - the path to analyze - """ - - -def pytest_collect_file(path, parent): - """ return collection Node or None for the given path. Any new node - needs to have the specified ``parent`` as a parent. - - :param path: a :py:class:`py.path.local` - the path to collect - """ - - -# logging hooks for collection - - -def pytest_collectstart(collector): - """ collector starts collecting. """ - - -def pytest_itemcollected(item): - """ we just collected a test item. """ - - -def pytest_collectreport(report): - """ collector finished collecting. """ - - -def pytest_deselected(items): - """ called for test items deselected, e.g. by keyword. """ - - -@hookspec(firstresult=True) -def pytest_make_collect_report(collector): - """ perform ``collector.collect()`` and return a CollectReport. - - Stops at first non-None result, see :ref:`firstresult` """ - - -# ------------------------------------------------------------------------- -# Python test function related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_pycollect_makemodule(path, parent): - """ return a Module collector or None for the given path. - This hook will be called for each matching test module path. - The pytest_collect_file hook needs to be used if you want to - create test modules for files that do not match as a test module. - - Stops at first non-None result, see :ref:`firstresult` - - :param path: a :py:class:`py.path.local` - the path of module to collect - """ - - -@hookspec(firstresult=True) -def pytest_pycollect_makeitem(collector, name, obj): - """ return custom item/collector for a python object in a module, or None. - - Stops at first non-None result, see :ref:`firstresult` """ - - -@hookspec(firstresult=True) -def pytest_pyfunc_call(pyfuncitem): - """ call underlying test function. - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_generate_tests(metafunc): - """ generate (multiple) parametrized calls to a test function.""" - - -@hookspec(firstresult=True) -def pytest_make_parametrize_id(config, val, argname): - """Return a user-friendly string representation of the given ``val`` that will be used - by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. - The parameter name is available as ``argname``, if required. - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.config.Config config: pytest config object - :param val: the parametrized value - :param str argname: the automatic parameter name produced by pytest - """ - - -# ------------------------------------------------------------------------- -# generic runtest related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_runtestloop(session): - """ called for performing the main runtest loop - (after collection finished). - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_itemstart(item, node): - """(**Deprecated**) use pytest_runtest_logstart. """ - - -@hookspec(firstresult=True) -def pytest_runtest_protocol(item, nextitem): - """ implements the runtest_setup/call/teardown protocol for - the given test item, including capturing exceptions and calling - reporting hooks. - - :arg item: test item for which the runtest protocol is performed. - - :arg nextitem: the scheduled-to-be-next test item (or None if this - is the end my friend). This argument is passed on to - :py:func:`pytest_runtest_teardown`. - - :return boolean: True if no further hook implementations should be invoked. - - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_runtest_logstart(nodeid, location): - """ signal the start of running a single test item. - - This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and - :func:`pytest_runtest_teardown` hooks. - - :param str nodeid: full id of the item - :param location: a triple of ``(filename, linenum, testname)`` - """ - - -def pytest_runtest_logfinish(nodeid, location): - """ signal the complete finish of running a single test item. - - This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and - :func:`pytest_runtest_teardown` hooks. - - :param str nodeid: full id of the item - :param location: a triple of ``(filename, linenum, testname)`` - """ - - -def pytest_runtest_setup(item): - """ called before ``pytest_runtest_call(item)``. """ - - -def pytest_runtest_call(item): - """ called to execute the test ``item``. """ - - -def pytest_runtest_teardown(item, nextitem): - """ called after ``pytest_runtest_call``. - - :arg nextitem: the scheduled-to-be-next test item (None if no further - test item is scheduled). This argument can be used to - perform exact teardowns, i.e. calling just enough finalizers - so that nextitem only needs to call setup-functions. - """ - - -@hookspec(firstresult=True) -def pytest_runtest_makereport(item, call): - """ return a :py:class:`_pytest.runner.TestReport` object - for the given :py:class:`pytest.Item <_pytest.main.Item>` and - :py:class:`_pytest.runner.CallInfo`. - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_runtest_logreport(report): - """ process a test setup/call/teardown report relating to - the respective phase of executing a test. """ - - -@hookspec(firstresult=True) -def pytest_report_to_serializable(config, report): - """ - .. warning:: - This hook is experimental and subject to change between pytest releases, even - bug fixes. - - The intent is for this to be used by plugins maintained by the core-devs, such - as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal - 'resultlog' plugin. - - In the future it might become part of the public hook API. - - Serializes the given report object into a data structure suitable for sending - over the wire, e.g. converted to JSON. - """ - - -@hookspec(firstresult=True) -def pytest_report_from_serializable(config, data): - """ - .. warning:: - This hook is experimental and subject to change between pytest releases, even - bug fixes. - - The intent is for this to be used by plugins maintained by the core-devs, such - as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal - 'resultlog' plugin. - - In the future it might become part of the public hook API. - - Restores a report object previously serialized with pytest_report_to_serializable(). - """ - - -# ------------------------------------------------------------------------- -# Fixture related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_fixture_setup(fixturedef, request): - """ performs fixture setup execution. - - :return: The return value of the call to the fixture function - - Stops at first non-None result, see :ref:`firstresult` - - .. note:: - If the fixture function returns None, other implementations of - this hook function will continue to be called, according to the - behavior of the :ref:`firstresult` option. - """ - - -def pytest_fixture_post_finalizer(fixturedef, request): - """ called after fixture teardown, but before the cache is cleared so - the fixture result cache ``fixturedef.cached_result`` can - still be accessed.""" - - -# ------------------------------------------------------------------------- -# test session related hooks -# ------------------------------------------------------------------------- - - -def pytest_sessionstart(session): - """ called after the ``Session`` object has been created and before performing collection - and entering the run test loop. - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_sessionfinish(session, exitstatus): - """ called after whole test run finished, right before returning the exit status to the system. - - :param _pytest.main.Session session: the pytest session object - :param int exitstatus: the status which pytest will return to the system - """ - - -def pytest_unconfigure(config): - """ called before test process is exited. - - :param _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# hooks for customizing the assert methods -# ------------------------------------------------------------------------- - - -def pytest_assertrepr_compare(config, op, left, right): - """return explanation for comparisons in failing assert expressions. - - Return None for no custom explanation, otherwise return a list - of strings. The strings will be joined by newlines but any newlines - *in* a string will be escaped. Note that all but the first line will - be indented slightly, the intention is for the first line to be a summary. - - :param _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# hooks for influencing reporting (invoked from _pytest_terminal) -# ------------------------------------------------------------------------- - - -def pytest_report_header(config, startdir): - """ return a string or list of strings to be displayed as header info for terminal reporting. - - :param _pytest.config.Config config: pytest config object - :param startdir: py.path object with the starting dir - - .. note:: - - This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directory due to how pytest - :ref:`discovers plugins during startup <pluginorder>`. - """ - - -def pytest_report_collectionfinish(config, startdir, items): - """ - .. versionadded:: 3.2 - - return a string or list of strings to be displayed after collection has finished successfully. - - This strings will be displayed after the standard "collected X items" message. - - :param _pytest.config.Config config: pytest config object - :param startdir: py.path object with the starting dir - :param items: list of pytest items that are going to be executed; this list should not be modified. - """ - - -@hookspec(firstresult=True) -def pytest_report_teststatus(report, config): - """ return result-category, shortletter and verbose word for reporting. - - :param _pytest.config.Config config: pytest config object - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_terminal_summary(terminalreporter, exitstatus, config): - """Add a section to terminal summary reporting. - - :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object - :param int exitstatus: the exit status that will be reported back to the OS - :param _pytest.config.Config config: pytest config object - - .. versionadded:: 4.2 - The ``config`` parameter. - """ - - -@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING) -def pytest_logwarning(message, code, nodeid, fslocation): - """ - .. deprecated:: 3.8 - - This hook is will stop working in a future release. - - pytest no longer triggers this hook, but the - terminal writer still implements it to display warnings issued by - :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be - an error in future releases. - - process a warning specified by a message, a code string, - a nodeid and fslocation (both of which may be None - if the warning is not tied to a particular node/location). - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_warning_captured(warning_message, when, item): - """ - Process a warning captured by the internal pytest warnings plugin. - - :param warnings.WarningMessage warning_message: - The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains - the same attributes as the parameters of :py:func:`warnings.showwarning`. - - :param str when: - Indicates when the warning was captured. Possible values: - - * ``"config"``: during pytest configuration/initialization stage. - * ``"collect"``: during test collection. - * ``"runtest"``: during test execution. - - :param pytest.Item|None item: - **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None`` - in a future release. - - The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. - """ - - -# ------------------------------------------------------------------------- -# doctest hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_doctest_prepare_content(content): - """ return processed content for a given doctest - - Stops at first non-None result, see :ref:`firstresult` """ - - -# ------------------------------------------------------------------------- -# error handling and internal debugging hooks -# ------------------------------------------------------------------------- - - -def pytest_internalerror(excrepr, excinfo): - """ called for internal errors. """ - - -def pytest_keyboard_interrupt(excinfo): - """ called for keyboard interrupt. """ - - -def pytest_exception_interact(node, call, report): - """called when an exception was raised which can potentially be - interactively handled. - - This hook is only called if an exception was raised - that is not an internal exception like ``skip.Exception``. - """ - - -def pytest_enter_pdb(config, pdb): - """ called upon pdb.set_trace(), can be used by plugins to take special - action just before the python debugger enters in interactive mode. - - :param _pytest.config.Config config: pytest config object - :param pdb.Pdb pdb: Pdb instance - """ - - -def pytest_leave_pdb(config, pdb): - """ called when leaving pdb (e.g. with continue after pdb.set_trace()). - - Can be used by plugins to take special action just after the python - debugger leaves interactive mode. - - :param _pytest.config.Config config: pytest config object - :param pdb.Pdb pdb: Pdb instance - """ diff --git a/contrib/python/pytest/py2/_pytest/junitxml.py b/contrib/python/pytest/py2/_pytest/junitxml.py deleted file mode 100644 index 853dcb7744..0000000000 --- a/contrib/python/pytest/py2/_pytest/junitxml.py +++ /dev/null @@ -1,707 +0,0 @@ -# -*- coding: utf-8 -*- -""" - report test results in JUnit-XML format, - for use with Jenkins and build integration servers. - - -Based on initial code from Ross Lawley. - -Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ -src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import os -import platform -import re -import sys -import time -from datetime import datetime - -import py -import six - -import pytest -from _pytest import nodes -from _pytest.config import filename_arg - -# Python 2.X and 3.X compatibility -if sys.version_info[0] < 3: - from codecs import open - - -class Junit(py.xml.Namespace): - pass - - -# We need to get the subset of the invalid unicode ranges according to -# XML 1.0 which are valid in this python build. Hence we calculate -# this dynamically instead of hardcoding it. The spec range of valid -# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] -# | [#x10000-#x10FFFF] -_legal_chars = (0x09, 0x0A, 0x0D) -_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) -_legal_xml_re = [ - u"%s-%s" % (six.unichr(low), six.unichr(high)) - for (low, high) in _legal_ranges - if low < sys.maxunicode -] -_legal_xml_re = [six.unichr(x) for x in _legal_chars] + _legal_xml_re -illegal_xml_re = re.compile(u"[^%s]" % u"".join(_legal_xml_re)) -del _legal_chars -del _legal_ranges -del _legal_xml_re - -_py_ext_re = re.compile(r"\.py$") - - -def bin_xml_escape(arg): - def repl(matchobj): - i = ord(matchobj.group()) - if i <= 0xFF: - return u"#x%02X" % i - else: - return u"#x%04X" % i - - return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) - - -def merge_family(left, right): - result = {} - for kl, vl in left.items(): - for kr, vr in right.items(): - if not isinstance(vl, list): - raise TypeError(type(vl)) - result[kl] = vl + vr - left.update(result) - - -families = {} -families["_base"] = {"testcase": ["classname", "name"]} -families["_base_legacy"] = {"testcase": ["file", "line", "url"]} - -# xUnit 1.x inherits legacy attributes -families["xunit1"] = families["_base"].copy() -merge_family(families["xunit1"], families["_base_legacy"]) - -# xUnit 2.x uses strict base attributes -families["xunit2"] = families["_base"] - - -class _NodeReporter(object): - def __init__(self, nodeid, xml): - self.id = nodeid - self.xml = xml - self.add_stats = self.xml.add_stats - self.family = self.xml.family - self.duration = 0 - self.properties = [] - self.nodes = [] - self.testcase = None - self.attrs = {} - - def append(self, node): - self.xml.add_stats(type(node).__name__) - self.nodes.append(node) - - def add_property(self, name, value): - self.properties.append((str(name), bin_xml_escape(value))) - - def add_attribute(self, name, value): - self.attrs[str(name)] = bin_xml_escape(value) - - def make_properties_node(self): - """Return a Junit node containing custom properties, if any. - """ - if self.properties: - return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.properties - ] - ) - return "" - - def record_testreport(self, testreport): - assert not self.testcase - names = mangle_test_address(testreport.nodeid) - existing_attrs = self.attrs - classnames = names[:-1] - if self.xml.prefix: - classnames.insert(0, self.xml.prefix) - attrs = { - "classname": ".".join(classnames), - "name": bin_xml_escape(names[-1]), - "file": testreport.location[0], - } - if testreport.location[1] is not None: - attrs["line"] = testreport.location[1] - if hasattr(testreport, "url"): - attrs["url"] = testreport.url - self.attrs = attrs - self.attrs.update(existing_attrs) # restore any user-defined attributes - - # Preserve legacy testcase behavior - if self.family == "xunit1": - return - - # Filter out attributes not permitted by this test family. - # Including custom attributes because they are not valid here. - temp_attrs = {} - for key in self.attrs.keys(): - if key in families[self.family]["testcase"]: - temp_attrs[key] = self.attrs[key] - self.attrs = temp_attrs - - def to_xml(self): - testcase = Junit.testcase(time="%.3f" % self.duration, **self.attrs) - testcase.append(self.make_properties_node()) - for node in self.nodes: - testcase.append(node) - return testcase - - def _add_simple(self, kind, message, data=None): - data = bin_xml_escape(data) - node = kind(data, message=message) - self.append(node) - - def write_captured_output(self, report): - if not self.xml.log_passing_tests and report.passed: - return - - content_out = report.capstdout - content_log = report.caplog - content_err = report.capstderr - - if content_log or content_out: - if content_log and self.xml.logging == "system-out": - if content_out: - # syncing stdout and the log-output is not done yet. It's - # probably not worth the effort. Therefore, first the captured - # stdout is shown and then the captured logs. - content = "\n".join( - [ - " Captured Stdout ".center(80, "-"), - content_out, - "", - " Captured Log ".center(80, "-"), - content_log, - ] - ) - else: - content = content_log - else: - content = content_out - - if content: - tag = getattr(Junit, "system-out") - self.append(tag(bin_xml_escape(content))) - - if content_log or content_err: - if content_log and self.xml.logging == "system-err": - if content_err: - content = "\n".join( - [ - " Captured Stderr ".center(80, "-"), - content_err, - "", - " Captured Log ".center(80, "-"), - content_log, - ] - ) - else: - content = content_log - else: - content = content_err - - if content: - tag = getattr(Junit, "system-err") - self.append(tag(bin_xml_escape(content))) - - def append_pass(self, report): - self.add_stats("passed") - - def append_failure(self, report): - # msg = str(report.longrepr.reprtraceback.extraline) - if hasattr(report, "wasxfail"): - self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") - else: - if hasattr(report.longrepr, "reprcrash"): - message = report.longrepr.reprcrash.message - elif isinstance(report.longrepr, six.string_types): - message = report.longrepr - else: - message = str(report.longrepr) - message = bin_xml_escape(message) - fail = Junit.failure(message=message) - fail.append(bin_xml_escape(report.longrepr)) - self.append(fail) - - def append_collect_error(self, report): - # msg = str(report.longrepr.reprtraceback.extraline) - self.append( - Junit.error(bin_xml_escape(report.longrepr), message="collection failure") - ) - - def append_collect_skipped(self, report): - self._add_simple(Junit.skipped, "collection skipped", report.longrepr) - - def append_error(self, report): - if report.when == "teardown": - msg = "test teardown failure" - else: - msg = "test setup failure" - self._add_simple(Junit.error, msg, report.longrepr) - - def append_skipped(self, report): - if hasattr(report, "wasxfail"): - xfailreason = report.wasxfail - if xfailreason.startswith("reason: "): - xfailreason = xfailreason[8:] - self.append( - Junit.skipped( - "", type="pytest.xfail", message=bin_xml_escape(xfailreason) - ) - ) - else: - filename, lineno, skipreason = report.longrepr - if skipreason.startswith("Skipped: "): - skipreason = skipreason[9:] - details = "%s:%s: %s" % (filename, lineno, skipreason) - - self.append( - Junit.skipped( - bin_xml_escape(details), - type="pytest.skip", - message=bin_xml_escape(skipreason), - ) - ) - self.write_captured_output(report) - - def finalize(self): - data = self.to_xml().unicode(indent=0) - self.__dict__.clear() - self.to_xml = lambda: py.xml.raw(data) - - -def _warn_incompatibility_with_xunit2(request, fixture_name): - """Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions""" - from _pytest.warning_types import PytestWarning - - xml = getattr(request.config, "_xml", None) - if xml is not None and xml.family not in ("xunit1", "legacy"): - request.node.warn( - PytestWarning( - "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( - fixture_name=fixture_name, family=xml.family - ) - ) - ) - - -@pytest.fixture -def record_property(request): - """Add an extra properties the calling test. - User properties become part of the test report and are available to the - configured reporters, like JUnit XML. - The fixture is callable with ``(name, value)``, with value being automatically - xml-encoded. - - Example:: - - def test_function(record_property): - record_property("example_key", 1) - """ - _warn_incompatibility_with_xunit2(request, "record_property") - - def append_property(name, value): - request.node.user_properties.append((name, value)) - - return append_property - - -@pytest.fixture -def record_xml_attribute(request): - """Add extra xml attributes to the tag for the calling test. - The fixture is callable with ``(name, value)``, with value being - automatically xml-encoded - """ - from _pytest.warning_types import PytestExperimentalApiWarning - - request.node.warn( - PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") - ) - - _warn_incompatibility_with_xunit2(request, "record_xml_attribute") - - # Declare noop - def add_attr_noop(name, value): - pass - - attr_func = add_attr_noop - - xml = getattr(request.config, "_xml", None) - if xml is not None: - node_reporter = xml.node_reporter(request.node.nodeid) - attr_func = node_reporter.add_attribute - - return attr_func - - -def _check_record_param_type(param, v): - """Used by record_testsuite_property to check that the given parameter name is of the proper - type""" - __tracebackhide__ = True - if not isinstance(v, six.string_types): - msg = "{param} parameter needs to be a string, but {g} given" - raise TypeError(msg.format(param=param, g=type(v).__name__)) - - -@pytest.fixture(scope="session") -def record_testsuite_property(request): - """ - Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to - writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family. - - This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: - - .. code-block:: python - - def test_foo(record_testsuite_property): - record_testsuite_property("ARCH", "PPC") - record_testsuite_property("STORAGE_TYPE", "CEPH") - - ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. - """ - - __tracebackhide__ = True - - def record_func(name, value): - """noop function in case --junitxml was not passed in the command-line""" - __tracebackhide__ = True - _check_record_param_type("name", name) - - xml = getattr(request.config, "_xml", None) - if xml is not None: - record_func = xml.add_global_property # noqa - return record_func - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group.addoption( - "--junitxml", - "--junit-xml", - action="store", - dest="xmlpath", - metavar="path", - type=functools.partial(filename_arg, optname="--junitxml"), - default=None, - help="create junit-xml style report file at given path.", - ) - group.addoption( - "--junitprefix", - "--junit-prefix", - action="store", - metavar="str", - default=None, - help="prepend prefix to classnames in junit-xml output", - ) - parser.addini( - "junit_suite_name", "Test suite name for JUnit report", default="pytest" - ) - parser.addini( - "junit_logging", - "Write captured log messages to JUnit report: " - "one of no|system-out|system-err", - default="no", - ) # choices=['no', 'stdout', 'stderr']) - parser.addini( - "junit_log_passing_tests", - "Capture log information for passing tests to JUnit report: ", - type="bool", - default=True, - ) - parser.addini( - "junit_duration_report", - "Duration time to report: one of total|call", - default="total", - ) # choices=['total', 'call']) - parser.addini( - "junit_family", - "Emit XML for schema: one of legacy|xunit1|xunit2", - default="xunit1", - ) - - -def pytest_configure(config): - xmlpath = config.option.xmlpath - # prevent opening xmllog on slave nodes (xdist) - if xmlpath and not hasattr(config, "slaveinput"): - config._xml = LogXML( - xmlpath, - config.option.junitprefix, - config.getini("junit_suite_name"), - config.getini("junit_logging"), - config.getini("junit_duration_report"), - config.getini("junit_family"), - config.getini("junit_log_passing_tests"), - ) - config.pluginmanager.register(config._xml) - - -def pytest_unconfigure(config): - xml = getattr(config, "_xml", None) - if xml: - del config._xml - config.pluginmanager.unregister(xml) - - -def mangle_test_address(address): - path, possible_open_bracket, params = address.partition("[") - names = path.split("::") - try: - names.remove("()") - except ValueError: - pass - # convert file path to dotted path - names[0] = names[0].replace(nodes.SEP, ".") - names[0] = _py_ext_re.sub("", names[0]) - # put any params back - names[-1] += possible_open_bracket + params - return names - - -class LogXML(object): - def __init__( - self, - logfile, - prefix, - suite_name="pytest", - logging="no", - report_duration="total", - family="xunit1", - log_passing_tests=True, - ): - logfile = os.path.expanduser(os.path.expandvars(logfile)) - self.logfile = os.path.normpath(os.path.abspath(logfile)) - self.prefix = prefix - self.suite_name = suite_name - self.logging = logging - self.log_passing_tests = log_passing_tests - self.report_duration = report_duration - self.family = family - self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) - self.node_reporters = {} # nodeid -> _NodeReporter - self.node_reporters_ordered = [] - self.global_properties = [] - - # List of reports that failed on call but teardown is pending. - self.open_reports = [] - self.cnt_double_fail_tests = 0 - - # Replaces convenience family with real family - if self.family == "legacy": - self.family = "xunit1" - - def finalize(self, report): - nodeid = getattr(report, "nodeid", report) - # local hack to handle xdist report order - slavenode = getattr(report, "node", None) - reporter = self.node_reporters.pop((nodeid, slavenode)) - if reporter is not None: - reporter.finalize() - - def node_reporter(self, report): - nodeid = getattr(report, "nodeid", report) - # local hack to handle xdist report order - slavenode = getattr(report, "node", None) - - key = nodeid, slavenode - - if key in self.node_reporters: - # TODO: breasks for --dist=each - return self.node_reporters[key] - - reporter = _NodeReporter(nodeid, self) - - self.node_reporters[key] = reporter - self.node_reporters_ordered.append(reporter) - - return reporter - - def add_stats(self, key): - if key in self.stats: - self.stats[key] += 1 - - def _opentestcase(self, report): - reporter = self.node_reporter(report) - reporter.record_testreport(report) - return reporter - - def pytest_runtest_logreport(self, report): - """handle a setup/call/teardown report, generating the appropriate - xml tags as necessary. - - note: due to plugins like xdist, this hook may be called in interlaced - order with reports from other nodes. for example: - - usual call order: - -> setup node1 - -> call node1 - -> teardown node1 - -> setup node2 - -> call node2 - -> teardown node2 - - possible call order in xdist: - -> setup node1 - -> call node1 - -> setup node2 - -> call node2 - -> teardown node2 - -> teardown node1 - """ - close_report = None - if report.passed: - if report.when == "call": # ignore setup/teardown - reporter = self._opentestcase(report) - reporter.append_pass(report) - elif report.failed: - if report.when == "teardown": - # The following vars are needed when xdist plugin is used - report_wid = getattr(report, "worker_id", None) - report_ii = getattr(report, "item_index", None) - close_report = next( - ( - rep - for rep in self.open_reports - if ( - rep.nodeid == report.nodeid - and getattr(rep, "item_index", None) == report_ii - and getattr(rep, "worker_id", None) == report_wid - ) - ), - None, - ) - if close_report: - # We need to open new testcase in case we have failure in - # call and error in teardown in order to follow junit - # schema - self.finalize(close_report) - self.cnt_double_fail_tests += 1 - reporter = self._opentestcase(report) - if report.when == "call": - reporter.append_failure(report) - self.open_reports.append(report) - if not self.log_passing_tests: - reporter.write_captured_output(report) - else: - reporter.append_error(report) - elif report.skipped: - reporter = self._opentestcase(report) - reporter.append_skipped(report) - self.update_testcase_duration(report) - if report.when == "teardown": - reporter = self._opentestcase(report) - reporter.write_captured_output(report) - - for propname, propvalue in report.user_properties: - reporter.add_property(propname, propvalue) - - self.finalize(report) - report_wid = getattr(report, "worker_id", None) - report_ii = getattr(report, "item_index", None) - close_report = next( - ( - rep - for rep in self.open_reports - if ( - rep.nodeid == report.nodeid - and getattr(rep, "item_index", None) == report_ii - and getattr(rep, "worker_id", None) == report_wid - ) - ), - None, - ) - if close_report: - self.open_reports.remove(close_report) - - def update_testcase_duration(self, report): - """accumulates total duration for nodeid from given report and updates - the Junit.testcase with the new total if already created. - """ - if self.report_duration == "total" or report.when == self.report_duration: - reporter = self.node_reporter(report) - reporter.duration += getattr(report, "duration", 0.0) - - def pytest_collectreport(self, report): - if not report.passed: - reporter = self._opentestcase(report) - if report.failed: - reporter.append_collect_error(report) - else: - reporter.append_collect_skipped(report) - - def pytest_internalerror(self, excrepr): - reporter = self.node_reporter("internal") - reporter.attrs.update(classname="pytest", name="internal") - reporter._add_simple(Junit.error, "internal error", excrepr) - - def pytest_sessionstart(self): - self.suite_start_time = time.time() - - def pytest_sessionfinish(self): - dirname = os.path.dirname(os.path.abspath(self.logfile)) - if not os.path.isdir(dirname): - os.makedirs(dirname) - logfile = open(self.logfile, "w", encoding="utf-8") - suite_stop_time = time.time() - suite_time_delta = suite_stop_time - self.suite_start_time - - numtests = ( - self.stats["passed"] - + self.stats["failure"] - + self.stats["skipped"] - + self.stats["error"] - - self.cnt_double_fail_tests - ) - logfile.write('<?xml version="1.0" encoding="utf-8"?>') - - suite_node = Junit.testsuite( - self._get_global_properties_node(), - [x.to_xml() for x in self.node_reporters_ordered], - name=self.suite_name, - errors=self.stats["error"], - failures=self.stats["failure"], - skipped=self.stats["skipped"], - tests=numtests, - time="%.3f" % suite_time_delta, - timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), - hostname=platform.node(), - ) - logfile.write(Junit.testsuites([suite_node]).unicode(indent=0)) - logfile.close() - - def pytest_terminal_summary(self, terminalreporter): - terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) - - def add_global_property(self, name, value): - __tracebackhide__ = True - _check_record_param_type("name", name) - self.global_properties.append((name, bin_xml_escape(value))) - - def _get_global_properties_node(self): - """Return a Junit node containing custom properties, if any. - """ - if self.global_properties: - return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.global_properties - ] - ) - return "" diff --git a/contrib/python/pytest/py2/_pytest/logging.py b/contrib/python/pytest/py2/_pytest/logging.py deleted file mode 100644 index 2400737ee4..0000000000 --- a/contrib/python/pytest/py2/_pytest/logging.py +++ /dev/null @@ -1,708 +0,0 @@ -# -*- coding: utf-8 -*- -""" Access and control log capturing. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import logging -import re -from contextlib import contextmanager - -import py -import six - -import pytest -from _pytest.compat import dummy_context_manager -from _pytest.config import create_terminal_writer -from _pytest.pathlib import Path - -DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" -DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" -_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") - - -def _remove_ansi_escape_sequences(text): - return _ANSI_ESCAPE_SEQ.sub("", text) - - -class ColoredLevelFormatter(logging.Formatter): - """ - Colorize the %(levelname)..s part of the log format passed to __init__. - """ - - LOGLEVEL_COLOROPTS = { - logging.CRITICAL: {"red"}, - logging.ERROR: {"red", "bold"}, - logging.WARNING: {"yellow"}, - logging.WARN: {"yellow"}, - logging.INFO: {"green"}, - logging.DEBUG: {"purple"}, - logging.NOTSET: set(), - } - LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") - - def __init__(self, terminalwriter, *args, **kwargs): - super(ColoredLevelFormatter, self).__init__(*args, **kwargs) - if six.PY2: - self._original_fmt = self._fmt - else: - self._original_fmt = self._style._fmt - self._level_to_fmt_mapping = {} - - levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) - if not levelname_fmt_match: - return - levelname_fmt = levelname_fmt_match.group() - - for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): - formatted_levelname = levelname_fmt % { - "levelname": logging.getLevelName(level) - } - - # add ANSI escape sequences around the formatted levelname - color_kwargs = {name: True for name in color_opts} - colorized_formatted_levelname = terminalwriter.markup( - formatted_levelname, **color_kwargs - ) - self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( - colorized_formatted_levelname, self._fmt - ) - - def format(self, record): - fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) - if six.PY2: - self._fmt = fmt - else: - self._style._fmt = fmt - return super(ColoredLevelFormatter, self).format(record) - - -if not six.PY2: - # Formatter classes don't support format styles in PY2 - - class PercentStyleMultiline(logging.PercentStyle): - """A logging style with special support for multiline messages. - - If the message of a record consists of multiple lines, this style - formats the message as if each line were logged separately. - """ - - @staticmethod - def _update_message(record_dict, message): - tmp = record_dict.copy() - tmp["message"] = message - return tmp - - def format(self, record): - if "\n" in record.message: - lines = record.message.splitlines() - formatted = self._fmt % self._update_message(record.__dict__, lines[0]) - # TODO optimize this by introducing an option that tells the - # logging framework that the indentation doesn't - # change. This allows to compute the indentation only once. - indentation = _remove_ansi_escape_sequences(formatted).find(lines[0]) - lines[0] = formatted - return ("\n" + " " * indentation).join(lines) - else: - return self._fmt % record.__dict__ - - -def get_option_ini(config, *names): - for name in names: - ret = config.getoption(name) # 'default' arg won't work as expected - if ret is None: - ret = config.getini(name) - if ret: - return ret - - -def pytest_addoption(parser): - """Add options to control log capturing.""" - group = parser.getgroup("logging") - - def add_option_ini(option, dest, default=None, type=None, **kwargs): - parser.addini( - dest, default=default, type=type, help="default value for " + option - ) - group.addoption(option, dest=dest, **kwargs) - - add_option_ini( - "--no-print-logs", - dest="log_print", - action="store_const", - const=False, - default=True, - type="bool", - help="disable printing caught logs on failed tests.", - ) - add_option_ini( - "--log-level", - dest="log_level", - default=None, - help="logging level used by the logging module", - ) - add_option_ini( - "--log-format", - dest="log_format", - default=DEFAULT_LOG_FORMAT, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-date-format", - dest="log_date_format", - default=DEFAULT_LOG_DATE_FORMAT, - help="log date format as used by the logging module.", - ) - parser.addini( - "log_cli", - default=False, - type="bool", - help='enable log display during test run (also known as "live logging").', - ) - add_option_ini( - "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." - ) - add_option_ini( - "--log-cli-format", - dest="log_cli_format", - default=None, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-cli-date-format", - dest="log_cli_date_format", - default=None, - help="log date format as used by the logging module.", - ) - add_option_ini( - "--log-file", - dest="log_file", - default=None, - help="path to a file when logging will be written to.", - ) - add_option_ini( - "--log-file-level", - dest="log_file_level", - default=None, - help="log file logging level.", - ) - add_option_ini( - "--log-file-format", - dest="log_file_format", - default=DEFAULT_LOG_FORMAT, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-file-date-format", - dest="log_file_date_format", - default=DEFAULT_LOG_DATE_FORMAT, - help="log date format as used by the logging module.", - ) - - -@contextmanager -def catching_logs(handler, formatter=None, level=None): - """Context manager that prepares the whole logging machinery properly.""" - root_logger = logging.getLogger() - - if formatter is not None: - handler.setFormatter(formatter) - if level is not None: - handler.setLevel(level) - - # Adding the same handler twice would confuse logging system. - # Just don't do that. - add_new_handler = handler not in root_logger.handlers - - if add_new_handler: - root_logger.addHandler(handler) - if level is not None: - orig_level = root_logger.level - root_logger.setLevel(min(orig_level, level)) - try: - yield handler - finally: - if level is not None: - root_logger.setLevel(orig_level) - if add_new_handler: - root_logger.removeHandler(handler) - - -class LogCaptureHandler(logging.StreamHandler): - """A logging handler that stores log records and the log text.""" - - def __init__(self): - """Creates a new log handler.""" - logging.StreamHandler.__init__(self, py.io.TextIO()) - self.records = [] - - def emit(self, record): - """Keep the log records in a list in addition to the log text.""" - self.records.append(record) - logging.StreamHandler.emit(self, record) - - def reset(self): - self.records = [] - self.stream = py.io.TextIO() - - -class LogCaptureFixture(object): - """Provides access and control of log capturing.""" - - def __init__(self, item): - """Creates a new funcarg.""" - self._item = item - # dict of log name -> log level - self._initial_log_levels = {} # Dict[str, int] - - def _finalize(self): - """Finalizes the fixture. - - This restores the log levels changed by :meth:`set_level`. - """ - # restore log levels - for logger_name, level in self._initial_log_levels.items(): - logger = logging.getLogger(logger_name) - logger.setLevel(level) - - @property - def handler(self): - """ - :rtype: LogCaptureHandler - """ - return self._item.catch_log_handler - - def get_records(self, when): - """ - Get the logging records for one of the possible test phases. - - :param str when: - Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". - - :rtype: List[logging.LogRecord] - :return: the list of captured records at the given stage - - .. versionadded:: 3.4 - """ - handler = self._item.catch_log_handlers.get(when) - if handler: - return handler.records - else: - return [] - - @property - def text(self): - """Returns the formatted log text.""" - return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) - - @property - def records(self): - """Returns the list of log records.""" - return self.handler.records - - @property - def record_tuples(self): - """Returns a list of a stripped down version of log records intended - for use in assertion comparison. - - The format of the tuple is: - - (logger_name, log_level, message) - """ - return [(r.name, r.levelno, r.getMessage()) for r in self.records] - - @property - def messages(self): - """Returns a list of format-interpolated log messages. - - Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list - are all interpolated. - Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with - levels, timestamps, etc, making exact comparisons more reliable. - - Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments - to the logging functions) is not included, as this is added by the formatter in the handler. - - .. versionadded:: 3.7 - """ - return [r.getMessage() for r in self.records] - - def clear(self): - """Reset the list of log records and the captured log text.""" - self.handler.reset() - - def set_level(self, level, logger=None): - """Sets the level for capturing of logs. The level will be restored to its previous value at the end of - the test. - - :param int level: the logger to level. - :param str logger: the logger to update the level. If not given, the root logger level is updated. - - .. versionchanged:: 3.4 - The levels of the loggers changed by this function will be restored to their initial values at the - end of the test. - """ - logger_name = logger - logger = logging.getLogger(logger_name) - # save the original log-level to restore it during teardown - self._initial_log_levels.setdefault(logger_name, logger.level) - logger.setLevel(level) - - @contextmanager - def at_level(self, level, logger=None): - """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the - level is restored to its original value. - - :param int level: the logger to level. - :param str logger: the logger to update the level. If not given, the root logger level is updated. - """ - logger = logging.getLogger(logger) - orig_level = logger.level - logger.setLevel(level) - try: - yield - finally: - logger.setLevel(orig_level) - - -@pytest.fixture -def caplog(request): - """Access and control log capturing. - - Captured logs are available through the following properties/methods:: - - * caplog.text -> string containing formatted log output - * caplog.records -> list of logging.LogRecord instances - * caplog.record_tuples -> list of (logger_name, level, message) tuples - * caplog.clear() -> clear captured records and formatted log output string - """ - result = LogCaptureFixture(request.node) - yield result - result._finalize() - - -def get_actual_log_level(config, *setting_names): - """Return the actual logging level.""" - - for setting_name in setting_names: - log_level = config.getoption(setting_name) - if log_level is None: - log_level = config.getini(setting_name) - if log_level: - break - else: - return - - if isinstance(log_level, six.string_types): - log_level = log_level.upper() - try: - return int(getattr(logging, log_level, log_level)) - except ValueError: - # Python logging does not recognise this as a logging level - raise pytest.UsageError( - "'{}' is not recognized as a logging level name for " - "'{}'. Please consider passing the " - "logging level num instead.".format(log_level, setting_name) - ) - - -# run after terminalreporter/capturemanager are configured -@pytest.hookimpl(trylast=True) -def pytest_configure(config): - config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") - - -class LoggingPlugin(object): - """Attaches to the logging module and captures log messages for each test. - """ - - def __init__(self, config): - """Creates a new plugin to capture log messages. - - The formatter can be safely shared across all handlers so - create a single one for the entire test session here. - """ - self._config = config - - self.print_logs = get_option_ini(config, "log_print") - self.formatter = self._create_formatter( - get_option_ini(config, "log_format"), - get_option_ini(config, "log_date_format"), - ) - self.log_level = get_actual_log_level(config, "log_level") - - self.log_file_level = get_actual_log_level(config, "log_file_level") - self.log_file_format = get_option_ini(config, "log_file_format", "log_format") - self.log_file_date_format = get_option_ini( - config, "log_file_date_format", "log_date_format" - ) - self.log_file_formatter = logging.Formatter( - self.log_file_format, datefmt=self.log_file_date_format - ) - - log_file = get_option_ini(config, "log_file") - if log_file: - self.log_file_handler = logging.FileHandler( - log_file, mode="w", encoding="UTF-8" - ) - self.log_file_handler.setFormatter(self.log_file_formatter) - else: - self.log_file_handler = None - - self.log_cli_handler = None - - self.live_logs_context = lambda: dummy_context_manager() - # Note that the lambda for the live_logs_context is needed because - # live_logs_context can otherwise not be entered multiple times due - # to limitations of contextlib.contextmanager. - - if self._log_cli_enabled(): - self._setup_cli_logging() - - def _create_formatter(self, log_format, log_date_format): - # color option doesn't exist if terminal plugin is disabled - color = getattr(self._config.option, "color", "no") - if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( - log_format - ): - formatter = ColoredLevelFormatter( - create_terminal_writer(self._config), log_format, log_date_format - ) - else: - formatter = logging.Formatter(log_format, log_date_format) - - if not six.PY2: - formatter._style = PercentStyleMultiline(formatter._style._fmt) - return formatter - - def _setup_cli_logging(self): - config = self._config - terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") - if terminal_reporter is None: - # terminal reporter is disabled e.g. by pytest-xdist. - return - - capture_manager = config.pluginmanager.get_plugin("capturemanager") - # if capturemanager plugin is disabled, live logging still works. - log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) - - log_cli_formatter = self._create_formatter( - get_option_ini(config, "log_cli_format", "log_format"), - get_option_ini(config, "log_cli_date_format", "log_date_format"), - ) - - log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level") - self.log_cli_handler = log_cli_handler - self.live_logs_context = lambda: catching_logs( - log_cli_handler, formatter=log_cli_formatter, level=log_cli_level - ) - - def set_log_path(self, fname): - """Public method, which can set filename parameter for - Logging.FileHandler(). Also creates parent directory if - it does not exist. - - .. warning:: - Please considered as an experimental API. - """ - fname = Path(fname) - - if not fname.is_absolute(): - fname = Path(self._config.rootdir, fname) - - if not fname.parent.exists(): - fname.parent.mkdir(exist_ok=True, parents=True) - - self.log_file_handler = logging.FileHandler( - str(fname), mode="w", encoding="UTF-8" - ) - self.log_file_handler.setFormatter(self.log_file_formatter) - - def _log_cli_enabled(self): - """Return True if log_cli should be considered enabled, either explicitly - or because --log-cli-level was given in the command-line. - """ - return self._config.getoption( - "--log-cli-level" - ) is not None or self._config.getini("log_cli") - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_collection(self): - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("collection") - - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @contextmanager - def _runtest_for(self, item, when): - with self._runtest_for_main(item, when): - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @contextmanager - def _runtest_for_main(self, item, when): - """Implements the internals of pytest_runtest_xxx() hook.""" - with catching_logs( - LogCaptureHandler(), formatter=self.formatter, level=self.log_level - ) as log_handler: - if self.log_cli_handler: - self.log_cli_handler.set_when(when) - - if item is None: - yield # run the test - return - - if not hasattr(item, "catch_log_handlers"): - item.catch_log_handlers = {} - item.catch_log_handlers[when] = log_handler - item.catch_log_handler = log_handler - try: - yield # run test - finally: - if when == "teardown": - del item.catch_log_handler - del item.catch_log_handlers - - if self.print_logs: - # Add a captured log section to the report. - log = log_handler.stream.getvalue().strip() - item.add_report_section(when, "log", log) - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_setup(self, item): - with self._runtest_for(item, "setup"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_call(self, item): - with self._runtest_for(item, "call"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_teardown(self, item): - with self._runtest_for(item, "teardown"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_logstart(self): - if self.log_cli_handler: - self.log_cli_handler.reset() - with self._runtest_for(None, "start"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_logfinish(self): - with self._runtest_for(None, "finish"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_logreport(self): - with self._runtest_for(None, "logreport"): - yield - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_sessionfinish(self): - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("sessionfinish") - if self.log_file_handler is not None: - try: - with catching_logs( - self.log_file_handler, level=self.log_file_level - ): - yield - finally: - # Close the FileHandler explicitly. - # (logging.shutdown might have lost the weakref?!) - self.log_file_handler.close() - else: - yield - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_sessionstart(self): - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("sessionstart") - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtestloop(self, session): - """Runs all collected test items.""" - - if session.config.option.collectonly: - yield - return - - if self._log_cli_enabled() and self._config.getoption("verbose") < 1: - # setting verbose flag is needed to avoid messy test progress output - self._config.option.verbose = 1 - - with self.live_logs_context(): - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield # run all the tests - else: - yield # run all the tests - - -class _LiveLoggingStreamHandler(logging.StreamHandler): - """ - Custom StreamHandler used by the live logging feature: it will write a newline before the first log message - in each test. - - During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured - and won't appear in the terminal. - """ - - def __init__(self, terminal_reporter, capture_manager): - """ - :param _pytest.terminal.TerminalReporter terminal_reporter: - :param _pytest.capture.CaptureManager capture_manager: - """ - logging.StreamHandler.__init__(self, stream=terminal_reporter) - self.capture_manager = capture_manager - self.reset() - self.set_when(None) - self._test_outcome_written = False - - def reset(self): - """Reset the handler; should be called before the start of each test""" - self._first_record_emitted = False - - def set_when(self, when): - """Prepares for the given test phase (setup/call/teardown)""" - self._when = when - self._section_name_shown = False - if when == "start": - self._test_outcome_written = False - - def emit(self, record): - ctx_manager = ( - self.capture_manager.global_and_fixture_disabled() - if self.capture_manager - else dummy_context_manager() - ) - with ctx_manager: - if not self._first_record_emitted: - self.stream.write("\n") - self._first_record_emitted = True - elif self._when in ("teardown", "finish"): - if not self._test_outcome_written: - self._test_outcome_written = True - self.stream.write("\n") - if not self._section_name_shown and self._when: - self.stream.section("live log " + self._when, sep="-", bold=True) - self._section_name_shown = True - logging.StreamHandler.emit(self, record) diff --git a/contrib/python/pytest/py2/_pytest/main.py b/contrib/python/pytest/py2/_pytest/main.py deleted file mode 100644 index a9d310cb62..0000000000 --- a/contrib/python/pytest/py2/_pytest/main.py +++ /dev/null @@ -1,780 +0,0 @@ -# -*- coding: utf-8 -*- -""" core implementation of testing process: init, session, runtest loop. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import contextlib -import fnmatch -import functools -import os -import pkgutil -import sys -import warnings - -import attr -import py -import six - -import _pytest._code -from _pytest import nodes -from _pytest.config import directory_arg -from _pytest.config import hookimpl -from _pytest.config import UsageError -from _pytest.deprecated import PYTEST_CONFIG_GLOBAL -from _pytest.outcomes import exit -from _pytest.runner import collect_one_node - -# exitcodes for the command line -EXIT_OK = 0 -EXIT_TESTSFAILED = 1 -EXIT_INTERRUPTED = 2 -EXIT_INTERNALERROR = 3 -EXIT_USAGEERROR = 4 -EXIT_NOTESTSCOLLECTED = 5 - - -def pytest_addoption(parser): - parser.addini( - "norecursedirs", - "directory patterns to avoid for recursion", - type="args", - default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"], - ) - parser.addini( - "testpaths", - "directories to search for tests when no files or directories are given in the " - "command line.", - type="args", - default=[], - ) - group = parser.getgroup("general", "running and selection options") - group._addoption( - "-x", - "--exitfirst", - action="store_const", - dest="maxfail", - const=1, - help="exit instantly on first error or failed test.", - ), - group._addoption( - "--maxfail", - metavar="num", - action="store", - type=int, - dest="maxfail", - default=0, - help="exit after first num failures or errors.", - ) - group._addoption( - "--strict-markers", - "--strict", - action="store_true", - help="markers not registered in the `markers` section of the configuration file raise errors.", - ) - group._addoption( - "-c", - metavar="file", - type=str, - dest="inifilename", - help="load configuration from `file` instead of trying to locate one of the implicit " - "configuration files.", - ) - group._addoption( - "--continue-on-collection-errors", - action="store_true", - default=False, - dest="continue_on_collection_errors", - help="Force test execution even if collection errors occur.", - ) - group._addoption( - "--rootdir", - action="store", - dest="rootdir", - help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " - "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " - "'$HOME/root_dir'.", - ) - - group = parser.getgroup("collect", "collection") - group.addoption( - "--collectonly", - "--collect-only", - action="store_true", - help="only collect tests, don't execute them.", - ), - group.addoption( - "--pyargs", - action="store_true", - help="try to interpret all arguments as python packages.", - ) - group.addoption( - "--ignore", - action="append", - metavar="path", - help="ignore path during collection (multi-allowed).", - ) - group.addoption( - "--ignore-glob", - action="append", - metavar="path", - help="ignore path pattern during collection (multi-allowed).", - ) - group.addoption( - "--deselect", - action="append", - metavar="nodeid_prefix", - help="deselect item during collection (multi-allowed).", - ) - # when changing this to --conf-cut-dir, config.py Conftest.setinitial - # needs upgrading as well - group.addoption( - "--confcutdir", - dest="confcutdir", - default=None, - metavar="dir", - type=functools.partial(directory_arg, optname="--confcutdir"), - help="only load conftest.py's relative to specified dir.", - ) - group.addoption( - "--noconftest", - action="store_true", - dest="noconftest", - default=False, - help="Don't load any conftest.py files.", - ) - group.addoption( - "--keepduplicates", - "--keep-duplicates", - action="store_true", - dest="keepduplicates", - default=False, - help="Keep duplicate tests.", - ) - group.addoption( - "--collect-in-virtualenv", - action="store_true", - dest="collect_in_virtualenv", - default=False, - help="Don't ignore tests in a local virtualenv directory", - ) - - group = parser.getgroup("debugconfig", "test session debugging and configuration") - group.addoption( - "--basetemp", - dest="basetemp", - default=None, - metavar="dir", - help=( - "base temporary directory for this test run." - "(warning: this directory is removed if it exists)" - ), - ) - - -class _ConfigDeprecated(object): - def __init__(self, config): - self.__dict__["_config"] = config - - def __getattr__(self, attr): - warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) - return getattr(self._config, attr) - - def __setattr__(self, attr, val): - warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) - return setattr(self._config, attr, val) - - def __repr__(self): - return "{}({!r})".format(type(self).__name__, self._config) - - -def pytest_configure(config): - __import__("pytest").config = _ConfigDeprecated(config) # compatibility - - -def wrap_session(config, doit): - """Skeleton command line program""" - session = Session(config) - session.exitstatus = EXIT_OK - initstate = 0 - try: - try: - config._do_configure() - initstate = 1 - config.hook.pytest_sessionstart(session=session) - initstate = 2 - session.exitstatus = doit(config, session) or 0 - except UsageError: - session.exitstatus = EXIT_USAGEERROR - raise - except Failed: - session.exitstatus = EXIT_TESTSFAILED - except (KeyboardInterrupt, exit.Exception): - excinfo = _pytest._code.ExceptionInfo.from_current() - exitstatus = EXIT_INTERRUPTED - if isinstance(excinfo.value, exit.Exception): - if excinfo.value.returncode is not None: - exitstatus = excinfo.value.returncode - if initstate < 2: - sys.stderr.write( - "{}: {}\n".format(excinfo.typename, excinfo.value.msg) - ) - config.hook.pytest_keyboard_interrupt(excinfo=excinfo) - session.exitstatus = exitstatus - except: # noqa - excinfo = _pytest._code.ExceptionInfo.from_current() - config.notify_exception(excinfo, config.option) - session.exitstatus = EXIT_INTERNALERROR - if excinfo.errisinstance(SystemExit): - sys.stderr.write("mainloop: caught unexpected SystemExit!\n") - - finally: - excinfo = None # Explicitly break reference cycle. - session.startdir.chdir() - if initstate >= 2: - config.hook.pytest_sessionfinish( - session=session, exitstatus=session.exitstatus - ) - config._ensure_unconfigure() - return session.exitstatus - - -def pytest_cmdline_main(config): - return wrap_session(config, _main) - - -def _main(config, session): - """ default command line protocol for initialization, session, - running tests and reporting. """ - config.hook.pytest_collection(session=session) - config.hook.pytest_runtestloop(session=session) - - if session.testsfailed: - return EXIT_TESTSFAILED - elif session.testscollected == 0: - return EXIT_NOTESTSCOLLECTED - - -def pytest_collection(session): - return session.perform_collect() - - -def pytest_runtestloop(session): - if session.testsfailed and not session.config.option.continue_on_collection_errors: - raise session.Interrupted("%d errors during collection" % session.testsfailed) - - if session.config.option.collectonly: - return True - - for i, item in enumerate(session.items): - nextitem = session.items[i + 1] if i + 1 < len(session.items) else None - item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) - if session.shouldfail: - raise session.Failed(session.shouldfail) - if session.shouldstop: - raise session.Interrupted(session.shouldstop) - return True - - -def _in_venv(path): - """Attempts to detect if ``path`` is the root of a Virtual Environment by - checking for the existence of the appropriate activate script""" - bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") - if not bindir.isdir(): - return False - activates = ( - "activate", - "activate.csh", - "activate.fish", - "Activate", - "Activate.bat", - "Activate.ps1", - ) - return any([fname.basename in activates for fname in bindir.listdir()]) - - -def pytest_ignore_collect(path, config): - ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) - ignore_paths = ignore_paths or [] - excludeopt = config.getoption("ignore") - if excludeopt: - ignore_paths.extend([py.path.local(x) for x in excludeopt]) - - if py.path.local(path) in ignore_paths: - return True - - ignore_globs = config._getconftest_pathlist( - "collect_ignore_glob", path=path.dirpath() - ) - ignore_globs = ignore_globs or [] - excludeglobopt = config.getoption("ignore_glob") - if excludeglobopt: - ignore_globs.extend([py.path.local(x) for x in excludeglobopt]) - - if any( - fnmatch.fnmatch(six.text_type(path), six.text_type(glob)) - for glob in ignore_globs - ): - return True - - allow_in_venv = config.getoption("collect_in_virtualenv") - if not allow_in_venv and _in_venv(path): - return True - - return False - - -def pytest_collection_modifyitems(items, config): - deselect_prefixes = tuple(config.getoption("deselect") or []) - if not deselect_prefixes: - return - - remaining = [] - deselected = [] - for colitem in items: - if colitem.nodeid.startswith(deselect_prefixes): - deselected.append(colitem) - else: - remaining.append(colitem) - - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining - - -@contextlib.contextmanager -def _patched_find_module(): - """Patch bug in pkgutil.ImpImporter.find_module - - When using pkgutil.find_loader on python<3.4 it removes symlinks - from the path due to a call to os.path.realpath. This is not consistent - with actually doing the import (in these versions, pkgutil and __import__ - did not share the same underlying code). This can break conftest - discovery for pytest where symlinks are involved. - - The only supported python<3.4 by pytest is python 2.7. - """ - if six.PY2: # python 3.4+ uses importlib instead - - def find_module_patched(self, fullname, path=None): - # Note: we ignore 'path' argument since it is only used via meta_path - subname = fullname.split(".")[-1] - if subname != fullname and self.path is None: - return None - if self.path is None: - path = None - else: - # original: path = [os.path.realpath(self.path)] - path = [self.path] - try: - file, filename, etc = pkgutil.imp.find_module(subname, path) - except ImportError: - return None - return pkgutil.ImpLoader(fullname, file, filename, etc) - - old_find_module = pkgutil.ImpImporter.find_module - pkgutil.ImpImporter.find_module = find_module_patched - try: - yield - finally: - pkgutil.ImpImporter.find_module = old_find_module - else: - yield - - -class FSHookProxy(object): - def __init__(self, fspath, pm, remove_mods): - self.fspath = fspath - self.pm = pm - self.remove_mods = remove_mods - - def __getattr__(self, name): - x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) - self.__dict__[name] = x - return x - - -class NoMatch(Exception): - """ raised if matching cannot locate a matching names. """ - - -class Interrupted(KeyboardInterrupt): - """ signals an interrupted test run. """ - - __module__ = "builtins" # for py3 - - -class Failed(Exception): - """ signals a stop as failed test run. """ - - -@attr.s -class _bestrelpath_cache(dict): - path = attr.ib() - - def __missing__(self, path): - r = self.path.bestrelpath(path) - self[path] = r - return r - - -class Session(nodes.FSCollector): - Interrupted = Interrupted - Failed = Failed - - def __init__(self, config): - nodes.FSCollector.__init__( - self, config.rootdir, parent=None, config=config, session=self, nodeid="" - ) - self.testsfailed = 0 - self.testscollected = 0 - self.shouldstop = False - self.shouldfail = False - self.trace = config.trace.root.get("collection") - self._norecursepatterns = config.getini("norecursedirs") - self.startdir = config.invocation_dir - self._initialpaths = frozenset() - # Keep track of any collected nodes in here, so we don't duplicate fixtures - self._node_cache = {} - self._bestrelpathcache = _bestrelpath_cache(config.rootdir) - # Dirnames of pkgs with dunder-init files. - self._pkg_roots = {} - - self.config.pluginmanager.register(self, name="session") - - def __repr__(self): - return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( - self.__class__.__name__, - self.name, - getattr(self, "exitstatus", "<UNSET>"), - self.testsfailed, - self.testscollected, - ) - - def _node_location_to_relpath(self, node_path): - # bestrelpath is a quite slow function - return self._bestrelpathcache[node_path] - - @hookimpl(tryfirst=True) - def pytest_collectstart(self): - if self.shouldfail: - raise self.Failed(self.shouldfail) - if self.shouldstop: - raise self.Interrupted(self.shouldstop) - - @hookimpl(tryfirst=True) - def pytest_runtest_logreport(self, report): - if report.failed and not hasattr(report, "wasxfail"): - self.testsfailed += 1 - maxfail = self.config.getvalue("maxfail") - if maxfail and self.testsfailed >= maxfail: - self.shouldfail = "stopping after %d failures" % (self.testsfailed) - - pytest_collectreport = pytest_runtest_logreport - - def isinitpath(self, path): - return path in self._initialpaths - - def gethookproxy(self, fspath): - # check if we have the common case of running - # hooks with all conftest.py files - pm = self.config.pluginmanager - my_conftestmodules = pm._getconftestmodules(fspath) - remove_mods = pm._conftest_plugins.difference(my_conftestmodules) - if remove_mods: - # one or more conftests are not in use at this fspath - proxy = FSHookProxy(fspath, pm, remove_mods) - else: - # all plugis are active for this fspath - proxy = self.config.hook - return proxy - - def perform_collect(self, args=None, genitems=True): - hook = self.config.hook - try: - items = self._perform_collect(args, genitems) - self.config.pluginmanager.check_pending() - hook.pytest_collection_modifyitems( - session=self, config=self.config, items=items - ) - finally: - hook.pytest_collection_finish(session=self) - self.testscollected = len(items) - return items - - def _perform_collect(self, args, genitems): - if args is None: - args = self.config.args - self.trace("perform_collect", self, args) - self.trace.root.indent += 1 - self._notfound = [] - initialpaths = [] - self._initialparts = [] - self.items = items = [] - for arg in args: - parts = self._parsearg(arg) - self._initialparts.append(parts) - initialpaths.append(parts[0]) - self._initialpaths = frozenset(initialpaths) - rep = collect_one_node(self) - self.ihook.pytest_collectreport(report=rep) - self.trace.root.indent -= 1 - if self._notfound: - errors = [] - for arg, exc in self._notfound: - line = "(no name %r in any of %r)" % (arg, exc.args[0]) - errors.append("not found: %s\n%s" % (arg, line)) - # XXX: test this - raise UsageError(*errors) - if not genitems: - return rep.result - else: - if rep.passed: - for node in rep.result: - self.items.extend(self.genitems(node)) - return items - - def collect(self): - for initialpart in self._initialparts: - arg = "::".join(map(str, initialpart)) - self.trace("processing argument", arg) - self.trace.root.indent += 1 - try: - for x in self._collect(arg): - yield x - except NoMatch: - # we are inside a make_report hook so - # we cannot directly pass through the exception - self._notfound.append((arg, sys.exc_info()[1])) - - self.trace.root.indent -= 1 - - def _collect(self, arg): - from _pytest.python import Package - - names = self._parsearg(arg) - argpath = names.pop(0) - - # Start with a Session root, and delve to argpath item (dir or file) - # and stack all Packages found on the way. - # No point in finding packages when collecting doctests - if not self.config.getoption("doctestmodules", False): - pm = self.config.pluginmanager - for parent in reversed(argpath.parts()): - if pm._confcutdir and pm._confcutdir.relto(parent): - break - - if parent.isdir(): - pkginit = parent.join("__init__.py") - if pkginit.isfile(): - if pkginit not in self._node_cache: - col = self._collectfile(pkginit, handle_dupes=False) - if col: - if isinstance(col[0], Package): - self._pkg_roots[parent] = col[0] - # always store a list in the cache, matchnodes expects it - self._node_cache[col[0].fspath] = [col[0]] - - # If it's a directory argument, recurse and look for any Subpackages. - # Let the Package collector deal with subnodes, don't collect here. - if argpath.check(dir=1): - assert not names, "invalid arg %r" % (arg,) - - seen_dirs = set() - for path in argpath.visit( - fil=self._visit_filter, rec=self._recurse, bf=True, sort=True - ): - dirpath = path.dirpath() - if dirpath not in seen_dirs: - # Collect packages first. - seen_dirs.add(dirpath) - pkginit = dirpath.join("__init__.py") - if pkginit.exists(): - for x in self._collectfile(pkginit): - yield x - if isinstance(x, Package): - self._pkg_roots[dirpath] = x - if dirpath in self._pkg_roots: - # Do not collect packages here. - continue - - for x in self._collectfile(path): - key = (type(x), x.fspath) - if key in self._node_cache: - yield self._node_cache[key] - else: - self._node_cache[key] = x - yield x - else: - assert argpath.check(file=1) - - if argpath in self._node_cache: - col = self._node_cache[argpath] - else: - collect_root = self._pkg_roots.get(argpath.dirname, self) - col = collect_root._collectfile(argpath, handle_dupes=False) - if col: - self._node_cache[argpath] = col - m = self.matchnodes(col, names) - # If __init__.py was the only file requested, then the matched node will be - # the corresponding Package, and the first yielded item will be the __init__ - # Module itself, so just use that. If this special case isn't taken, then all - # the files in the package will be yielded. - if argpath.basename == "__init__.py": - try: - yield next(m[0].collect()) - except StopIteration: - # The package collects nothing with only an __init__.py - # file in it, which gets ignored by the default - # "python_files" option. - pass - return - for y in m: - yield y - - def _collectfile(self, path, handle_dupes=True): - assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % ( - path, - path.isdir(), - path.exists(), - path.islink(), - ) - ihook = self.gethookproxy(path) - if not self.isinitpath(path): - if ihook.pytest_ignore_collect(path=path, config=self.config): - return () - - if handle_dupes: - keepduplicates = self.config.getoption("keepduplicates") - if not keepduplicates: - duplicate_paths = self.config.pluginmanager._duplicatepaths - if path in duplicate_paths: - return () - else: - duplicate_paths.add(path) - - return ihook.pytest_collect_file(path=path, parent=self) - - def _recurse(self, dirpath): - if dirpath.basename == "__pycache__": - return False - ihook = self.gethookproxy(dirpath.dirpath()) - if ihook.pytest_ignore_collect(path=dirpath, config=self.config): - return False - for pat in self._norecursepatterns: - if dirpath.check(fnmatch=pat): - return False - ihook = self.gethookproxy(dirpath) - ihook.pytest_collect_directory(path=dirpath, parent=self) - return True - - if six.PY2: - - @staticmethod - def _visit_filter(f): - return f.check(file=1) and not f.strpath.endswith("*.pyc") - - else: - - @staticmethod - def _visit_filter(f): - return f.check(file=1) - - def _tryconvertpyarg(self, x): - """Convert a dotted module name to path.""" - try: - with _patched_find_module(): - loader = pkgutil.find_loader(x) - except ImportError: - return x - if loader is None: - return x - # This method is sometimes invoked when AssertionRewritingHook, which - # does not define a get_filename method, is already in place: - try: - with _patched_find_module(): - path = loader.get_filename(x) - except AttributeError: - # Retrieve path from AssertionRewritingHook: - path = loader.modules[x][0].co_filename - if loader.is_package(x): - path = os.path.dirname(path) - return path - - def _parsearg(self, arg): - """ return (fspath, names) tuple after checking the file exists. """ - parts = str(arg).split("::") - if self.config.option.pyargs: - parts[0] = self._tryconvertpyarg(parts[0]) - relpath = parts[0].replace("/", os.sep) - path = self.config.invocation_dir.join(relpath, abs=True) - if not path.check(): - if self.config.option.pyargs: - raise UsageError( - "file or package not found: " + arg + " (missing __init__.py?)" - ) - raise UsageError("file not found: " + arg) - parts[0] = path.realpath() - return parts - - def matchnodes(self, matching, names): - self.trace("matchnodes", matching, names) - self.trace.root.indent += 1 - nodes = self._matchnodes(matching, names) - num = len(nodes) - self.trace("matchnodes finished -> ", num, "nodes") - self.trace.root.indent -= 1 - if num == 0: - raise NoMatch(matching, names[:1]) - return nodes - - def _matchnodes(self, matching, names): - if not matching or not names: - return matching - name = names[0] - assert name - nextnames = names[1:] - resultnodes = [] - for node in matching: - if isinstance(node, nodes.Item): - if not names: - resultnodes.append(node) - continue - assert isinstance(node, nodes.Collector) - key = (type(node), node.nodeid) - if key in self._node_cache: - rep = self._node_cache[key] - else: - rep = collect_one_node(node) - self._node_cache[key] = rep - if rep.passed: - has_matched = False - for x in rep.result: - # TODO: remove parametrized workaround once collection structure contains parametrization - if x.name == name or x.name.split("[")[0] == name: - resultnodes.extend(self.matchnodes([x], nextnames)) - has_matched = True - # XXX accept IDs that don't have "()" for class instances - if not has_matched and len(rep.result) == 1 and x.name == "()": - nextnames.insert(0, name) - resultnodes.extend(self.matchnodes([x], nextnames)) - else: - # report collection failures here to avoid failing to run some test - # specified in the command line because the module could not be - # imported (#134) - node.ihook.pytest_collectreport(report=rep) - return resultnodes - - def genitems(self, node): - self.trace("genitems", node) - if isinstance(node, nodes.Item): - node.ihook.pytest_itemcollected(item=node) - yield node - else: - assert isinstance(node, nodes.Collector) - rep = collect_one_node(node) - if rep.passed: - for subnode in rep.result: - for x in self.genitems(subnode): - yield x - node.ihook.pytest_collectreport(report=rep) diff --git a/contrib/python/pytest/py2/_pytest/mark/__init__.py b/contrib/python/pytest/py2/_pytest/mark/__init__.py deleted file mode 100644 index 6bc22fe27d..0000000000 --- a/contrib/python/pytest/py2/_pytest/mark/__init__.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- coding: utf-8 -*- -""" generic mechanism for marking and selecting python functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from .legacy import matchkeyword -from .legacy import matchmark -from .structures import EMPTY_PARAMETERSET_OPTION -from .structures import get_empty_parameterset_mark -from .structures import Mark -from .structures import MARK_GEN -from .structures import MarkDecorator -from .structures import MarkGenerator -from .structures import ParameterSet -from _pytest.config import UsageError - -__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"] - - -def param(*values, **kw): - """Specify a parameter in `pytest.mark.parametrize`_ calls or - :ref:`parametrized fixtures <fixture-parametrize-marks>`. - - .. code-block:: python - - @pytest.mark.parametrize("test_input,expected", [ - ("3+5", 8), - pytest.param("6*9", 42, marks=pytest.mark.xfail), - ]) - def test_eval(test_input, expected): - assert eval(test_input) == expected - - :param values: variable args of the values of the parameter set, in order. - :keyword marks: a single mark or a list of marks to be applied to this parameter set. - :keyword str id: the id to attribute to this parameter set. - """ - return ParameterSet.param(*values, **kw) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "-k", - action="store", - dest="keyword", - default="", - metavar="EXPRESSION", - help="only run tests which match the given substring expression. " - "An expression is a python evaluatable expression " - "where all names are substring-matched against test names " - "and their parent classes. Example: -k 'test_method or test_" - "other' matches all test functions and classes whose name " - "contains 'test_method' or 'test_other', while -k 'not test_method' " - "matches those that don't contain 'test_method' in their names. " - "-k 'not test_method and not test_other' will eliminate the matches. " - "Additionally keywords are matched to classes and functions " - "containing extra names in their 'extra_keyword_matches' set, " - "as well as functions which have names assigned directly to them.", - ) - - group._addoption( - "-m", - action="store", - dest="markexpr", - default="", - metavar="MARKEXPR", - help="only run tests matching given mark expression. " - "example: -m 'mark1 and not mark2'.", - ) - - group.addoption( - "--markers", - action="store_true", - help="show markers (builtin, plugin and per-project ones).", - ) - - parser.addini("markers", "markers for test functions", "linelist") - parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") - - -def pytest_cmdline_main(config): - import _pytest.config - - if config.option.markers: - config._do_configure() - tw = _pytest.config.create_terminal_writer(config) - for line in config.getini("markers"): - parts = line.split(":", 1) - name = parts[0] - rest = parts[1] if len(parts) == 2 else "" - tw.write("@pytest.mark.%s:" % name, bold=True) - tw.line(rest) - tw.line() - config._ensure_unconfigure() - return 0 - - -pytest_cmdline_main.tryfirst = True - - -def deselect_by_keyword(items, config): - keywordexpr = config.option.keyword.lstrip() - if not keywordexpr: - return - - if keywordexpr.startswith("-"): - keywordexpr = "not " + keywordexpr[1:] - selectuntil = False - if keywordexpr[-1:] == ":": - selectuntil = True - keywordexpr = keywordexpr[:-1] - - remaining = [] - deselected = [] - for colitem in items: - if keywordexpr and not matchkeyword(colitem, keywordexpr): - deselected.append(colitem) - else: - if selectuntil: - keywordexpr = None - remaining.append(colitem) - - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining - - -def deselect_by_mark(items, config): - matchexpr = config.option.markexpr - if not matchexpr: - return - - remaining = [] - deselected = [] - for item in items: - if matchmark(item, matchexpr): - remaining.append(item) - else: - deselected.append(item) - - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining - - -def pytest_collection_modifyitems(items, config): - deselect_by_keyword(items, config) - deselect_by_mark(items, config) - - -def pytest_configure(config): - config._old_mark_config = MARK_GEN._config - MARK_GEN._config = config - - empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) - - if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): - raise UsageError( - "{!s} must be one of skip, xfail or fail_at_collect" - " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) - ) - - -def pytest_unconfigure(config): - MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/contrib/python/pytest/py2/_pytest/mark/evaluate.py b/contrib/python/pytest/py2/_pytest/mark/evaluate.py deleted file mode 100644 index 506546e253..0000000000 --- a/contrib/python/pytest/py2/_pytest/mark/evaluate.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import platform -import sys -import traceback - -import six - -from ..outcomes import fail -from ..outcomes import TEST_OUTCOME - - -def cached_eval(config, expr, d): - if not hasattr(config, "_evalcache"): - config._evalcache = {} - try: - return config._evalcache[expr] - except KeyError: - import _pytest._code - - exprcode = _pytest._code.compile(expr, mode="eval") - config._evalcache[expr] = x = eval(exprcode, d) - return x - - -class MarkEvaluator(object): - def __init__(self, item, name): - self.item = item - self._marks = None - self._mark = None - self._mark_name = name - - def __bool__(self): - # dont cache here to prevent staleness - return bool(self._get_marks()) - - __nonzero__ = __bool__ - - def wasvalid(self): - return not hasattr(self, "exc") - - def _get_marks(self): - return list(self.item.iter_markers(name=self._mark_name)) - - def invalidraise(self, exc): - raises = self.get("raises") - if not raises: - return - return not isinstance(exc, raises) - - def istrue(self): - try: - return self._istrue() - except TEST_OUTCOME: - self.exc = sys.exc_info() - if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^"] - msg.append("SyntaxError: invalid syntax") - else: - msg = traceback.format_exception_only(*self.exc[:2]) - fail( - "Error evaluating %r expression\n" - " %s\n" - "%s" % (self._mark_name, self.expr, "\n".join(msg)), - pytrace=False, - ) - - def _getglobals(self): - d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} - if hasattr(self.item, "obj"): - d.update(self.item.obj.__globals__) - return d - - def _istrue(self): - if hasattr(self, "result"): - return self.result - self._marks = self._get_marks() - - if self._marks: - self.result = False - for mark in self._marks: - self._mark = mark - if "condition" in mark.kwargs: - args = (mark.kwargs["condition"],) - else: - args = mark.args - - for expr in args: - self.expr = expr - if isinstance(expr, six.string_types): - d = self._getglobals() - result = cached_eval(self.item.config, expr, d) - else: - if "reason" not in mark.kwargs: - # XXX better be checked at collection time - msg = ( - "you need to specify reason=STRING " - "when using booleans as conditions." - ) - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = mark.kwargs.get("reason", None) - self.expr = expr - return self.result - - if not args: - self.result = True - self.reason = mark.kwargs.get("reason", None) - return self.result - return False - - def get(self, attr, default=None): - if self._mark is None: - return default - return self._mark.kwargs.get(attr, default) - - def getexplanation(self): - expl = getattr(self, "reason", None) or self.get("reason", None) - if not expl: - if not hasattr(self, "expr"): - return "" - else: - return "condition: " + str(self.expr) - return expl diff --git a/contrib/python/pytest/py2/_pytest/mark/legacy.py b/contrib/python/pytest/py2/_pytest/mark/legacy.py deleted file mode 100644 index c56482f14d..0000000000 --- a/contrib/python/pytest/py2/_pytest/mark/legacy.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -""" -this is a place where we put datastructures used by legacy apis -we hope ot remove -""" -import keyword - -import attr - -from _pytest.config import UsageError - - -@attr.s -class MarkMapping(object): - """Provides a local mapping for markers where item access - resolves to True if the marker is present. """ - - own_mark_names = attr.ib() - - @classmethod - def from_item(cls, item): - mark_names = {mark.name for mark in item.iter_markers()} - return cls(mark_names) - - def __getitem__(self, name): - return name in self.own_mark_names - - -class KeywordMapping(object): - """Provides a local mapping for keywords. - Given a list of names, map any substring of one of these names to True. - """ - - def __init__(self, names): - self._names = names - - @classmethod - def from_item(cls, item): - mapped_names = set() - - # Add the names of the current item and any parent items - import pytest - - for item in item.listchain(): - if not isinstance(item, pytest.Instance): - mapped_names.add(item.name) - - # Add the names added as extra keywords to current or parent items - mapped_names.update(item.listextrakeywords()) - - # Add the names attached to the current function through direct assignment - if hasattr(item, "function"): - mapped_names.update(item.function.__dict__) - - # add the markers to the keywords as we no longer handle them correctly - mapped_names.update(mark.name for mark in item.iter_markers()) - - return cls(mapped_names) - - def __getitem__(self, subname): - for name in self._names: - if subname in name: - return True - return False - - -python_keywords_allowed_list = ["or", "and", "not"] - - -def matchmark(colitem, markexpr): - """Tries to match on any marker names, attached to the given colitem.""" - try: - return eval(markexpr, {}, MarkMapping.from_item(colitem)) - except SyntaxError as e: - raise SyntaxError(str(e) + "\nMarker expression must be valid Python!") - - -def matchkeyword(colitem, keywordexpr): - """Tries to match given keyword expression to given collector item. - - Will match on the name of colitem, including the names of its parents. - Only matches names of items which are either a :class:`Class` or a - :class:`Function`. - Additionally, matches on names in the 'extra_keyword_matches' set of - any item, as well as names directly assigned to test functions. - """ - mapping = KeywordMapping.from_item(colitem) - if " " not in keywordexpr: - # special case to allow for simple "-k pass" and "-k 1.3" - return mapping[keywordexpr] - elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: - return not mapping[keywordexpr[4:]] - for kwd in keywordexpr.split(): - if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: - raise UsageError( - "Python keyword '{}' not accepted in expressions passed to '-k'".format( - kwd - ) - ) - try: - return eval(keywordexpr, {}, mapping) - except SyntaxError: - raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/contrib/python/pytest/py2/_pytest/mark/structures.py b/contrib/python/pytest/py2/_pytest/mark/structures.py deleted file mode 100644 index aaebe927be..0000000000 --- a/contrib/python/pytest/py2/_pytest/mark/structures.py +++ /dev/null @@ -1,413 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import warnings -from collections import namedtuple -from operator import attrgetter - -import attr -import six - -from ..compat import ascii_escaped -from ..compat import ATTRS_EQ_FIELD -from ..compat import getfslineno -from ..compat import MappingMixin -from ..compat import NOTSET -from _pytest.deprecated import PYTEST_PARAM_UNKNOWN_KWARGS -from _pytest.outcomes import fail -from _pytest.warning_types import PytestUnknownMarkWarning - -EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" - - -def alias(name, warning=None): - getter = attrgetter(name) - - def warned(self): - warnings.warn(warning, stacklevel=2) - return getter(self) - - return property(getter if warning is None else warned, doc="alias for " + name) - - -def istestfunc(func): - return ( - hasattr(func, "__call__") - and getattr(func, "__name__", "<lambda>") != "<lambda>" - ) - - -def get_empty_parameterset_mark(config, argnames, func): - from ..nodes import Collector - - requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) - if requested_mark in ("", None, "skip"): - mark = MARK_GEN.skip - elif requested_mark == "xfail": - mark = MARK_GEN.xfail(run=False) - elif requested_mark == "fail_at_collect": - f_name = func.__name__ - _, lineno = getfslineno(func) - raise Collector.CollectError( - "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) - ) - else: - raise LookupError(requested_mark) - fs, lineno = getfslineno(func) - reason = "got empty parameter set %r, function %s at %s:%d" % ( - argnames, - func.__name__, - fs, - lineno, - ) - return mark(reason=reason) - - -class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): - @classmethod - def param(cls, *values, **kwargs): - marks = kwargs.pop("marks", ()) - if isinstance(marks, MarkDecorator): - marks = (marks,) - else: - assert isinstance(marks, (tuple, list, set)) - - id_ = kwargs.pop("id", None) - if id_ is not None: - if not isinstance(id_, six.string_types): - raise TypeError( - "Expected id to be a string, got {}: {!r}".format(type(id_), id_) - ) - id_ = ascii_escaped(id_) - - if kwargs: - warnings.warn( - PYTEST_PARAM_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=3 - ) - return cls(values, marks, id_) - - @classmethod - def extract_from(cls, parameterset, force_tuple=False): - """ - :param parameterset: - a legacy style parameterset that may or may not be a tuple, - and may or may not be wrapped into a mess of mark objects - - :param force_tuple: - enforce tuple wrapping so single argument tuple values - don't get decomposed and break tests - """ - - if isinstance(parameterset, cls): - return parameterset - if force_tuple: - return cls.param(parameterset) - else: - return cls(parameterset, marks=[], id=None) - - @staticmethod - def _parse_parametrize_args(argnames, argvalues, *args, **kwargs): - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - force_tuple = len(argnames) == 1 - else: - force_tuple = False - return argnames, force_tuple - - @staticmethod - def _parse_parametrize_parameters(argvalues, force_tuple): - return [ - ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues - ] - - @classmethod - def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): - argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) - parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) - del argvalues - - if parameters: - # check all parameter sets have the correct number of values - for param in parameters: - if len(param.values) != len(argnames): - msg = ( - '{nodeid}: in "parametrize" the number of names ({names_len}):\n' - " {names}\n" - "must be equal to the number of values ({values_len}):\n" - " {values}" - ) - fail( - msg.format( - nodeid=function_definition.nodeid, - values=param.values, - names=argnames, - names_len=len(argnames), - values_len=len(param.values), - ), - pytrace=False, - ) - else: - # empty parameter set (likely computed at runtime): create a single - # parameter set with NOTSET values, with the "empty parameter set" mark applied to it - mark = get_empty_parameterset_mark(config, argnames, func) - parameters.append( - ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) - ) - return argnames, parameters - - -@attr.s(frozen=True) -class Mark(object): - #: name of the mark - name = attr.ib(type=str) - #: positional arguments of the mark decorator - args = attr.ib() # List[object] - #: keyword arguments of the mark decorator - kwargs = attr.ib() # Dict[str, object] - - def combined_with(self, other): - """ - :param other: the mark to combine with - :type other: Mark - :rtype: Mark - - combines by appending args and merging the mappings - """ - assert self.name == other.name - return Mark( - self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) - ) - - -@attr.s -class MarkDecorator(object): - """ A decorator for test functions and test classes. When applied - it will create :class:`MarkInfo` objects which may be - :ref:`retrieved by hooks as item keywords <excontrolskip>`. - MarkDecorator instances are often created like this:: - - mark1 = pytest.mark.NAME # simple MarkDecorator - mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator - - and can then be applied as decorators to test functions:: - - @mark2 - def test_function(): - pass - - When a MarkDecorator instance is called it does the following: - 1. If called with a single class as its only positional argument and no - additional keyword arguments, it attaches itself to the class so it - gets applied automatically to all test cases found in that class. - 2. If called with a single function as its only positional argument and - no additional keyword arguments, it attaches a MarkInfo object to the - function, containing all the arguments already stored internally in - the MarkDecorator. - 3. When called in any other case, it performs a 'fake construction' call, - i.e. it returns a new MarkDecorator instance with the original - MarkDecorator's content updated with the arguments passed to this - call. - - Note: The rules above prevent MarkDecorator objects from storing only a - single function or class reference as their positional argument with no - additional keyword or positional arguments. - - """ - - mark = attr.ib(validator=attr.validators.instance_of(Mark)) - - name = alias("mark.name") - args = alias("mark.args") - kwargs = alias("mark.kwargs") - - @property - def markname(self): - return self.name # for backward-compat (2.4.1 had this attr) - - def __eq__(self, other): - return self.mark == other.mark if isinstance(other, MarkDecorator) else False - - def __repr__(self): - return "<MarkDecorator %r>" % (self.mark,) - - def with_args(self, *args, **kwargs): - """ return a MarkDecorator with extra arguments added - - unlike call this can be used even if the sole argument is a callable/class - - :return: MarkDecorator - """ - - mark = Mark(self.name, args, kwargs) - return self.__class__(self.mark.combined_with(mark)) - - def __call__(self, *args, **kwargs): - """ if passed a single callable argument: decorate it with mark info. - otherwise add *args/**kwargs in-place to mark information. """ - if args and not kwargs: - func = args[0] - is_class = inspect.isclass(func) - if len(args) == 1 and (istestfunc(func) or is_class): - store_mark(func, self.mark) - return func - return self.with_args(*args, **kwargs) - - -def get_unpacked_marks(obj): - """ - obtain the unpacked marks that are stored on an object - """ - mark_list = getattr(obj, "pytestmark", []) - if not isinstance(mark_list, list): - mark_list = [mark_list] - return normalize_mark_list(mark_list) - - -def normalize_mark_list(mark_list): - """ - normalizes marker decorating helpers to mark objects - - :type mark_list: List[Union[Mark, Markdecorator]] - :rtype: List[Mark] - """ - extracted = [ - getattr(mark, "mark", mark) for mark in mark_list - ] # unpack MarkDecorator - for mark in extracted: - if not isinstance(mark, Mark): - raise TypeError("got {!r} instead of Mark".format(mark)) - return [x for x in extracted if isinstance(x, Mark)] - - -def store_mark(obj, mark): - """store a Mark on an object - this is used to implement the Mark declarations/decorators correctly - """ - assert isinstance(mark, Mark), mark - # always reassign name to avoid updating pytestmark - # in a reference that was only borrowed - obj.pytestmark = get_unpacked_marks(obj) + [mark] - - -class MarkGenerator(object): - """ Factory for :class:`MarkDecorator` objects - exposed as - a ``pytest.mark`` singleton instance. Example:: - - import pytest - @pytest.mark.slowtest - def test_function(): - pass - - will set a 'slowtest' :class:`MarkInfo` object - on the ``test_function`` object. """ - - _config = None - _markers = set() - - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError("Marker name must NOT start with underscore") - - if self._config is not None: - # We store a set of markers as a performance optimisation - if a mark - # name is in the set we definitely know it, but a mark may be known and - # not in the set. We therefore start by updating the set! - if name not in self._markers: - for line in self._config.getini("markers"): - # example lines: "skipif(condition): skip the given test if..." - # or "hypothesis: tests which use Hypothesis", so to get the - # marker name we split on both `:` and `(`. - if line == "ya:external": - marker = line - else: - marker = line.split(":")[0].split("(")[0].strip() - self._markers.add(marker) - - # If the name is not in the set of known marks after updating, - # then it really is time to issue a warning or an error. - if name not in self._markers: - if self._config.option.strict_markers: - fail( - "{!r} not found in `markers` configuration option".format(name), - pytrace=False, - ) - else: - warnings.warn( - "Unknown pytest.mark.%s - is this a typo? You can register " - "custom marks to avoid this warning - for details, see " - "https://docs.pytest.org/en/latest/mark.html" % name, - PytestUnknownMarkWarning, - ) - - return MarkDecorator(Mark(name, (), {})) - - -MARK_GEN = MarkGenerator() - - -class NodeKeywords(MappingMixin): - def __init__(self, node): - self.node = node - self.parent = node.parent - self._markers = {node.name: True} - - def __getitem__(self, key): - try: - return self._markers[key] - except KeyError: - if self.parent is None: - raise - return self.parent.keywords[key] - - def __setitem__(self, key, value): - self._markers[key] = value - - def __delitem__(self, key): - raise ValueError("cannot delete key in keywords dict") - - def __iter__(self): - seen = self._seen() - return iter(seen) - - def _seen(self): - seen = set(self._markers) - if self.parent is not None: - seen.update(self.parent.keywords) - return seen - - def __len__(self): - return len(self._seen()) - - def __repr__(self): - return "<NodeKeywords for node %s>" % (self.node,) - - -# mypy cannot find this overload, remove when on attrs>=19.2 -@attr.s(hash=False, **{ATTRS_EQ_FIELD: False}) # type: ignore -class NodeMarkers(object): - """ - internal structure for storing marks belonging to a node - - ..warning:: - - unstable api - - """ - - own_markers = attr.ib(default=attr.Factory(list)) - - def update(self, add_markers): - """update the own markers - """ - self.own_markers.extend(add_markers) - - def find(self, name): - """ - find markers in own nodes or parent nodes - needs a better place - """ - for mark in self.own_markers: - if mark.name == name: - yield mark - - def __iter__(self): - return iter(self.own_markers) diff --git a/contrib/python/pytest/py2/_pytest/monkeypatch.py b/contrib/python/pytest/py2/_pytest/monkeypatch.py deleted file mode 100644 index e8671b0c70..0000000000 --- a/contrib/python/pytest/py2/_pytest/monkeypatch.py +++ /dev/null @@ -1,336 +0,0 @@ -# -*- coding: utf-8 -*- -""" monkeypatching and mocking functionality. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import re -import sys -import warnings -from contextlib import contextmanager - -import six - -import pytest -from _pytest.fixtures import fixture -from _pytest.pathlib import Path - -RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") - - -@fixture -def monkeypatch(): - """The returned ``monkeypatch`` fixture provides these - helper methods to modify objects, dictionaries or os.environ:: - - monkeypatch.setattr(obj, name, value, raising=True) - monkeypatch.delattr(obj, name, raising=True) - monkeypatch.setitem(mapping, name, value) - monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, raising=True) - monkeypatch.syspath_prepend(path) - monkeypatch.chdir(path) - - All modifications will be undone after the requesting - test function or fixture has finished. The ``raising`` - parameter determines if a KeyError or AttributeError - will be raised if the set/deletion operation has no target. - """ - mpatch = MonkeyPatch() - yield mpatch - mpatch.undo() - - -def resolve(name): - # simplified from zope.dottedname - parts = name.split(".") - - used = parts.pop(0) - found = __import__(used) - for part in parts: - used += "." + part - try: - found = getattr(found, part) - except AttributeError: - pass - else: - continue - # we use explicit un-nesting of the handling block in order - # to avoid nested exceptions on python 3 - try: - __import__(used) - except ImportError as ex: - # str is used for py2 vs py3 - expected = str(ex).split()[-1] - if expected == used: - raise - else: - raise ImportError("import error in %s: %s" % (used, ex)) - found = annotated_getattr(found, part, used) - return found - - -def annotated_getattr(obj, name, ann): - try: - obj = getattr(obj, name) - except AttributeError: - raise AttributeError( - "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) - ) - return obj - - -def derive_importpath(import_path, raising): - if not isinstance(import_path, six.string_types) or "." not in import_path: - raise TypeError("must be absolute import path string, not %r" % (import_path,)) - module, attr = import_path.rsplit(".", 1) - target = resolve(module) - if raising: - annotated_getattr(target, attr, ann=module) - return attr, target - - -class Notset(object): - def __repr__(self): - return "<notset>" - - -notset = Notset() - - -class MonkeyPatch(object): - """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. - """ - - def __init__(self): - self._setattr = [] - self._setitem = [] - self._cwd = None - self._savesyspath = None - - @contextmanager - def context(self): - """ - Context manager that returns a new :class:`MonkeyPatch` object which - undoes any patching done inside the ``with`` block upon exit: - - .. code-block:: python - - import functools - def test_partial(monkeypatch): - with monkeypatch.context() as m: - m.setattr(functools, "partial", 3) - - Useful in situations where it is desired to undo some patches before the test ends, - such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples - of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_. - """ - m = MonkeyPatch() - try: - yield m - finally: - m.undo() - - def setattr(self, target, name, value=notset, raising=True): - """ Set attribute value on target, memorizing the old value. - By default raise AttributeError if the attribute did not exist. - - For convenience you can specify a string as ``target`` which - will be interpreted as a dotted import path, with the last part - being the attribute name. Example: - ``monkeypatch.setattr("os.getcwd", lambda: "/")`` - would set the ``getcwd`` function of the ``os`` module. - - The ``raising`` value determines if the setattr should fail - if the attribute is not already present (defaults to True - which means it will raise). - """ - __tracebackhide__ = True - import inspect - - if value is notset: - if not isinstance(target, six.string_types): - raise TypeError( - "use setattr(target, name, value) or " - "setattr(target, value) with target being a dotted " - "import string" - ) - value = name - name, target = derive_importpath(target, raising) - - oldval = getattr(target, name, notset) - if raising and oldval is notset: - raise AttributeError("%r has no attribute %r" % (target, name)) - - # avoid class descriptors like staticmethod/classmethod - if inspect.isclass(target): - oldval = target.__dict__.get(name, notset) - self._setattr.append((target, name, oldval)) - setattr(target, name, value) - - def delattr(self, target, name=notset, raising=True): - """ Delete attribute ``name`` from ``target``, by default raise - AttributeError it the attribute did not previously exist. - - If no ``name`` is specified and ``target`` is a string - it will be interpreted as a dotted import path with the - last part being the attribute name. - - If ``raising`` is set to False, no exception will be raised if the - attribute is missing. - """ - __tracebackhide__ = True - import inspect - - if name is notset: - if not isinstance(target, six.string_types): - raise TypeError( - "use delattr(target, name) or " - "delattr(target) with target being a dotted " - "import string" - ) - name, target = derive_importpath(target, raising) - - if not hasattr(target, name): - if raising: - raise AttributeError(name) - else: - oldval = getattr(target, name, notset) - # Avoid class descriptors like staticmethod/classmethod. - if inspect.isclass(target): - oldval = target.__dict__.get(name, notset) - self._setattr.append((target, name, oldval)) - delattr(target, name) - - def setitem(self, dic, name, value): - """ Set dictionary entry ``name`` to value. """ - self._setitem.append((dic, name, dic.get(name, notset))) - dic[name] = value - - def delitem(self, dic, name, raising=True): - """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. - - If ``raising`` is set to False, no exception will be raised if the - key is missing. - """ - if name not in dic: - if raising: - raise KeyError(name) - else: - self._setitem.append((dic, name, dic.get(name, notset))) - del dic[name] - - def _warn_if_env_name_is_not_str(self, name): - """On Python 2, warn if the given environment variable name is not a native str (#4056)""" - if six.PY2 and not isinstance(name, str): - warnings.warn( - pytest.PytestWarning( - "Environment variable name {!r} should be str".format(name) - ) - ) - - def setenv(self, name, value, prepend=None): - """ Set environment variable ``name`` to ``value``. If ``prepend`` - is a character, read the current environment variable value - and prepend the ``value`` adjoined with the ``prepend`` character.""" - if not isinstance(value, str): - warnings.warn( - pytest.PytestWarning( - "Value of environment variable {name} type should be str, but got " - "{value!r} (type: {type}); converted to str implicitly".format( - name=name, value=value, type=type(value).__name__ - ) - ), - stacklevel=2, - ) - value = str(value) - if prepend and name in os.environ: - value = value + prepend + os.environ[name] - self._warn_if_env_name_is_not_str(name) - self.setitem(os.environ, name, value) - - def delenv(self, name, raising=True): - """ Delete ``name`` from the environment. Raise KeyError if it does - not exist. - - If ``raising`` is set to False, no exception will be raised if the - environment variable is missing. - """ - self._warn_if_env_name_is_not_str(name) - self.delitem(os.environ, name, raising=raising) - - def syspath_prepend(self, path): - """ Prepend ``path`` to ``sys.path`` list of import locations. """ - from pkg_resources import fixup_namespace_packages - - if self._savesyspath is None: - self._savesyspath = sys.path[:] - sys.path.insert(0, str(path)) - - # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 - fixup_namespace_packages(str(path)) - - # A call to syspathinsert() usually means that the caller wants to - # import some dynamically created files, thus with python3 we - # invalidate its import caches. - # This is especially important when any namespace package is in used, - # since then the mtime based FileFinder cache (that gets created in - # this case already) gets not invalidated when writing the new files - # quickly afterwards. - if sys.version_info >= (3, 3): - from importlib import invalidate_caches - - invalidate_caches() - - def chdir(self, path): - """ Change the current working directory to the specified path. - Path can be a string or a py.path.local object. - """ - if self._cwd is None: - self._cwd = os.getcwd() - if hasattr(path, "chdir"): - path.chdir() - elif isinstance(path, Path): - # modern python uses the fspath protocol here LEGACY - os.chdir(str(path)) - else: - os.chdir(path) - - def undo(self): - """ Undo previous changes. This call consumes the - undo stack. Calling it a second time has no effect unless - you do more monkeypatching after the undo call. - - There is generally no need to call `undo()`, since it is - called automatically during tear-down. - - Note that the same `monkeypatch` fixture is used across a - single test function invocation. If `monkeypatch` is used both by - the test function itself and one of the test fixtures, - calling `undo()` will undo all of the changes made in - both functions. - """ - for obj, name, value in reversed(self._setattr): - if value is not notset: - setattr(obj, name, value) - else: - delattr(obj, name) - self._setattr[:] = [] - for dictionary, name, value in reversed(self._setitem): - if value is notset: - try: - del dictionary[name] - except KeyError: - pass # was already deleted, so we have the desired state - else: - dictionary[name] = value - self._setitem[:] = [] - if self._savesyspath is not None: - sys.path[:] = self._savesyspath - self._savesyspath = None - - if self._cwd is not None: - os.chdir(self._cwd) - self._cwd = None diff --git a/contrib/python/pytest/py2/_pytest/nodes.py b/contrib/python/pytest/py2/_pytest/nodes.py deleted file mode 100644 index 206e9ae163..0000000000 --- a/contrib/python/pytest/py2/_pytest/nodes.py +++ /dev/null @@ -1,429 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import warnings - -import py -import six - -import _pytest._code -from _pytest.compat import getfslineno -from _pytest.mark.structures import NodeKeywords -from _pytest.outcomes import fail - -SEP = "/" - -tracebackcutdir = py.path.local(_pytest.__file__).dirpath() - - -def _splitnode(nodeid): - """Split a nodeid into constituent 'parts'. - - Node IDs are strings, and can be things like: - '' - 'testing/code' - 'testing/code/test_excinfo.py' - 'testing/code/test_excinfo.py::TestFormattedExcinfo' - - Return values are lists e.g. - [] - ['testing', 'code'] - ['testing', 'code', 'test_excinfo.py'] - ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] - """ - if nodeid == "": - # If there is no root node at all, return an empty list so the caller's logic can remain sane - return [] - parts = nodeid.split(SEP) - # Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar' - parts[-1:] = parts[-1].split("::") - return parts - - -def ischildnode(baseid, nodeid): - """Return True if the nodeid is a child node of the baseid. - - E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' - """ - base_parts = _splitnode(baseid) - node_parts = _splitnode(nodeid) - if len(node_parts) < len(base_parts): - return False - return node_parts[: len(base_parts)] == base_parts - - -class Node(object): - """ base class for Collector and Item the test collection tree. - Collector subclasses have children, Items are terminal nodes.""" - - def __init__( - self, name, parent=None, config=None, session=None, fspath=None, nodeid=None - ): - #: a unique name within the scope of the parent node - self.name = name - - #: the parent collector node. - self.parent = parent - - #: the pytest config object - self.config = config or parent.config - - #: the session this node is part of - self.session = session or parent.session - - #: filesystem path where this node was collected from (can be None) - self.fspath = fspath or getattr(parent, "fspath", None) - - #: keywords/markers collected from all scopes - self.keywords = NodeKeywords(self) - - #: the marker objects belonging to this node - self.own_markers = [] - - #: allow adding of extra keywords to use for matching - self.extra_keyword_matches = set() - - # used for storing artificial fixturedefs for direct parametrization - self._name2pseudofixturedef = {} - - if nodeid is not None: - assert "::()" not in nodeid - self._nodeid = nodeid - else: - self._nodeid = self.parent.nodeid - if self.name != "()": - self._nodeid += "::" + self.name - - @property - def ihook(self): - """ fspath sensitive hook proxy used to call pytest hooks""" - return self.session.gethookproxy(self.fspath) - - def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None)) - - def warn(self, warning): - """Issue a warning for this item. - - Warnings will be displayed after the test session, unless explicitly suppressed - - :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. - - :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. - - Example usage: - - .. code-block:: python - - node.warn(PytestWarning("some message")) - - """ - from _pytest.warning_types import PytestWarning - - if not isinstance(warning, PytestWarning): - raise ValueError( - "warning must be an instance of PytestWarning or subclass, got {!r}".format( - warning - ) - ) - path, lineno = get_fslocation_from_item(self) - warnings.warn_explicit( - warning, - category=None, - filename=str(path), - lineno=lineno + 1 if lineno is not None else None, - ) - - # methods for ordering nodes - @property - def nodeid(self): - """ a ::-separated string denoting its collection tree address. """ - return self._nodeid - - def __hash__(self): - return hash(self.nodeid) - - def setup(self): - pass - - def teardown(self): - pass - - def listchain(self): - """ return list of all parent collectors up to self, - starting from root of collection tree. """ - chain = [] - item = self - while item is not None: - chain.append(item) - item = item.parent - chain.reverse() - return chain - - def add_marker(self, marker, append=True): - """dynamically add a marker object to the node. - - :type marker: ``str`` or ``pytest.mark.*`` object - :param marker: - ``append=True`` whether to append the marker, - if ``False`` insert at position ``0``. - """ - from _pytest.mark import MarkDecorator, MARK_GEN - - if isinstance(marker, six.string_types): - marker = getattr(MARK_GEN, marker) - elif not isinstance(marker, MarkDecorator): - raise ValueError("is not a string or pytest.mark.* Marker") - self.keywords[marker.name] = marker - if append: - self.own_markers.append(marker.mark) - else: - self.own_markers.insert(0, marker.mark) - - def iter_markers(self, name=None): - """ - :param name: if given, filter the results by the name attribute - - iterate over all markers of the node - """ - return (x[1] for x in self.iter_markers_with_node(name=name)) - - def iter_markers_with_node(self, name=None): - """ - :param name: if given, filter the results by the name attribute - - iterate over all markers of the node - returns sequence of tuples (node, mark) - """ - for node in reversed(self.listchain()): - for mark in node.own_markers: - if name is None or getattr(mark, "name", None) == name: - yield node, mark - - def get_closest_marker(self, name, default=None): - """return the first marker matching the name, from closest (for example function) to farther level (for example - module level). - - :param default: fallback return value of no marker was found - :param name: name to filter by - """ - return next(self.iter_markers(name=name), default) - - def listextrakeywords(self): - """ Return a set of all extra keywords in self and any parents.""" - extra_keywords = set() - for item in self.listchain(): - extra_keywords.update(item.extra_keyword_matches) - return extra_keywords - - def listnames(self): - return [x.name for x in self.listchain()] - - def addfinalizer(self, fin): - """ register a function to be called when this node is finalized. - - This method can only be called when this node is active - in a setup chain, for example during self.setup(). - """ - self.session._setupstate.addfinalizer(fin, self) - - def getparent(self, cls): - """ get the next parent node (including ourself) - which is an instance of the given class""" - current = self - while current and not isinstance(current, cls): - current = current.parent - return current - - def _prunetraceback(self, excinfo): - pass - - def _repr_failure_py(self, excinfo, style=None): - if excinfo.errisinstance(fail.Exception): - if not excinfo.value.pytrace: - return six.text_type(excinfo.value) - fm = self.session._fixturemanager - if excinfo.errisinstance(fm.FixtureLookupError): - return excinfo.value.formatrepr() - tbfilter = True - if self.config.getoption("fulltrace", False): - style = "long" - else: - tb = _pytest._code.Traceback([excinfo.traceback[-1]]) - self._prunetraceback(excinfo) - if len(excinfo.traceback) == 0: - excinfo.traceback = tb - tbfilter = False # prunetraceback already does it - if style == "auto": - style = "long" - # XXX should excinfo.getrepr record all data and toterminal() process it? - if style is None: - if self.config.getoption("tbstyle", "auto") == "short": - style = "short" - else: - style = "long" - - if self.config.getoption("verbose", 0) > 1: - truncate_locals = False - else: - truncate_locals = True - - try: - os.getcwd() - abspath = False - except OSError: - abspath = True - - return excinfo.getrepr( - funcargs=True, - abspath=abspath, - showlocals=self.config.getoption("showlocals", False), - style=style, - tbfilter=tbfilter, - truncate_locals=truncate_locals, - ) - - repr_failure = _repr_failure_py - - -def get_fslocation_from_item(item): - """Tries to extract the actual location from an item, depending on available attributes: - - * "fslocation": a pair (path, lineno) - * "obj": a Python object that the item wraps. - * "fspath": just a path - - :rtype: a tuple of (str|LocalPath, int) with filename and line number. - """ - result = getattr(item, "location", None) - if result is not None: - return result[:2] - obj = getattr(item, "obj", None) - if obj is not None: - return getfslineno(obj) - return getattr(item, "fspath", "unknown location"), -1 - - -class Collector(Node): - """ Collector instances create children through collect() - and thus iteratively build a tree. - """ - - class CollectError(Exception): - """ an error during collection, contains a custom message. """ - - def collect(self): - """ returns a list of children (items and collectors) - for this collection node. - """ - raise NotImplementedError("abstract") - - def repr_failure(self, excinfo): - """ represent a collection failure. """ - if excinfo.errisinstance(self.CollectError): - exc = excinfo.value - return str(exc.args[0]) - - # Respect explicit tbstyle option, but default to "short" - # (None._repr_failure_py defaults to "long" without "fulltrace" option). - tbstyle = self.config.getoption("tbstyle", "auto") - if tbstyle == "auto": - tbstyle = "short" - - return self._repr_failure_py(excinfo, style=tbstyle) - - def _prunetraceback(self, excinfo): - if hasattr(self, "fspath"): - traceback = excinfo.traceback - ntraceback = traceback.cut(path=self.fspath) - if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=tracebackcutdir) - excinfo.traceback = ntraceback.filter() - - -def _check_initialpaths_for_relpath(session, fspath): - for initial_path in session._initialpaths: - if fspath.common(initial_path) == initial_path: - return fspath.relto(initial_path) - - -class FSCollector(Collector): - def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): - fspath = py.path.local(fspath) # xxx only for test_resultlog.py? - name = fspath.basename - if parent is not None: - rel = fspath.relto(parent.fspath) - if rel: - name = rel - name = name.replace(os.sep, SEP) - self.fspath = fspath - - session = session or parent.session - - if nodeid is None: - nodeid = self.fspath.relto(session.config.rootdir) - - if not nodeid: - nodeid = _check_initialpaths_for_relpath(session, fspath) - if nodeid and os.sep != SEP: - nodeid = nodeid.replace(os.sep, SEP) - - super(FSCollector, self).__init__( - name, parent, config, session, nodeid=nodeid, fspath=fspath - ) - - -class File(FSCollector): - """ base class for collecting tests from a file. """ - - -class Item(Node): - """ a basic test invocation item. Note that for a single function - there might be multiple test invocation items. - """ - - nextitem = None - - def __init__(self, name, parent=None, config=None, session=None, nodeid=None): - super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) - self._report_sections = [] - - #: user properties is a list of tuples (name, value) that holds user - #: defined properties for this test. - self.user_properties = [] - - def add_report_section(self, when, key, content): - """ - Adds a new report section, similar to what's done internally to add stdout and - stderr captured output:: - - item.add_report_section("call", "stdout", "report section contents") - - :param str when: - One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. - :param str key: - Name of the section, can be customized at will. Pytest uses ``"stdout"`` and - ``"stderr"`` internally. - - :param str content: - The full contents as a string. - """ - if content: - self._report_sections.append((when, key, content)) - - def reportinfo(self): - return self.fspath, None, "" - - @property - def location(self): - try: - return self._location - except AttributeError: - location = self.reportinfo() - fspath = self.session._node_location_to_relpath(location[0]) - location = (fspath, location[1], str(location[2])) - self._location = location - return location diff --git a/contrib/python/pytest/py2/_pytest/nose.py b/contrib/python/pytest/py2/_pytest/nose.py deleted file mode 100644 index fbab91da24..0000000000 --- a/contrib/python/pytest/py2/_pytest/nose.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -""" run test suites written for nose. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -import six - -import pytest -from _pytest import python -from _pytest import runner -from _pytest import unittest -from _pytest.config import hookimpl - - -def get_skip_exceptions(): - skip_classes = set() - for module_name in ("unittest", "unittest2", "nose"): - mod = sys.modules.get(module_name) - if hasattr(mod, "SkipTest"): - skip_classes.add(mod.SkipTest) - return tuple(skip_classes) - - -def pytest_runtest_makereport(item, call): - if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): - # let's substitute the excinfo with a pytest.skip one - call2 = runner.CallInfo.from_call( - lambda: pytest.skip(six.text_type(call.excinfo.value)), call.when - ) - call.excinfo = call2.excinfo - - -@hookimpl(trylast=True) -def pytest_runtest_setup(item): - if is_potential_nosetest(item): - if not call_optional(item.obj, "setup"): - # call module level setup if there is no object level one - call_optional(item.parent.obj, "setup") - # XXX this implies we only call teardown when setup worked - item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) - - -def teardown_nose(item): - if is_potential_nosetest(item): - if not call_optional(item.obj, "teardown"): - call_optional(item.parent.obj, "teardown") - # if hasattr(item.parent, '_nosegensetup'): - # #call_optional(item._nosegensetup, 'teardown') - # del item.parent._nosegensetup - - -def is_potential_nosetest(item): - # extra check needed since we do not do nose style setup/teardown - # on direct unittest style classes - return isinstance(item, python.Function) and not isinstance( - item, unittest.TestCaseFunction - ) - - -def call_optional(obj, name): - method = getattr(obj, name, None) - isfixture = hasattr(method, "_pytestfixturefunction") - if method is not None and not isfixture and callable(method): - # If there's any problems allow the exception to raise rather than - # silently ignoring them - method() - return True diff --git a/contrib/python/pytest/py2/_pytest/outcomes.py b/contrib/python/pytest/py2/_pytest/outcomes.py deleted file mode 100644 index 4620f957c7..0000000000 --- a/contrib/python/pytest/py2/_pytest/outcomes.py +++ /dev/null @@ -1,187 +0,0 @@ -# -*- coding: utf-8 -*- -""" -exception classes and constants handling test outcomes -as well as functions creating them -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -from packaging.version import Version - - -class OutcomeException(BaseException): - """ OutcomeException and its subclass instances indicate and - contain info about test and collection outcomes. - """ - - def __init__(self, msg=None, pytrace=True): - BaseException.__init__(self, msg) - self.msg = msg - self.pytrace = pytrace - - def __repr__(self): - if self.msg: - val = self.msg - if isinstance(val, bytes): - val = val.decode("UTF-8", errors="replace") - return val - return "<%s instance>" % (self.__class__.__name__,) - - __str__ = __repr__ - - -TEST_OUTCOME = (OutcomeException, Exception) - - -class Skipped(OutcomeException): - # XXX hackish: on 3k we fake to live in the builtins - # in order to have Skipped exception printing shorter/nicer - __module__ = "builtins" - - def __init__(self, msg=None, pytrace=True, allow_module_level=False): - OutcomeException.__init__(self, msg=msg, pytrace=pytrace) - self.allow_module_level = allow_module_level - - -class Failed(OutcomeException): - """ raised from an explicit call to pytest.fail() """ - - __module__ = "builtins" - - -class Exit(Exception): - """ raised for immediate program exits (no tracebacks/summaries)""" - - def __init__(self, msg="unknown reason", returncode=None): - self.msg = msg - self.returncode = returncode - super(Exit, self).__init__(msg) - - -# exposed helper methods - - -def exit(msg, returncode=None): - """ - Exit testing process. - - :param str msg: message to display upon exit. - :param int returncode: return code to be used when exiting pytest. - """ - __tracebackhide__ = True - raise Exit(msg, returncode) - - -exit.Exception = Exit - - -def skip(msg="", **kwargs): - """ - Skip an executing test with the given message. - - This function should be called only during testing (setup, call or teardown) or - during collection by using the ``allow_module_level`` flag. This function can - be called in doctests as well. - - :kwarg bool allow_module_level: allows this function to be called at - module level, skipping the rest of the module. Default to False. - - .. note:: - It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be - skipped under certain conditions like mismatching platforms or - dependencies. - Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP - <https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_) - to skip a doctest statically. - """ - __tracebackhide__ = True - allow_module_level = kwargs.pop("allow_module_level", False) - if kwargs: - raise TypeError("unexpected keyword arguments: {}".format(sorted(kwargs))) - raise Skipped(msg=msg, allow_module_level=allow_module_level) - - -skip.Exception = Skipped - - -def fail(msg="", pytrace=True): - """ - Explicitly fail an executing test with the given message. - - :param str msg: the message to show the user as reason for the failure. - :param bool pytrace: if false the msg represents the full failure information and no - python traceback will be reported. - """ - __tracebackhide__ = True - raise Failed(msg=msg, pytrace=pytrace) - - -fail.Exception = Failed - - -class XFailed(fail.Exception): - """ raised from an explicit call to pytest.xfail() """ - - -def xfail(reason=""): - """ - Imperatively xfail an executing test or setup functions with the given reason. - - This function should be called only during testing (setup, call or teardown). - - .. note:: - It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be - xfailed under certain conditions like known bugs or missing features. - """ - __tracebackhide__ = True - raise XFailed(reason) - - -xfail.Exception = XFailed - - -def importorskip(modname, minversion=None, reason=None): - """Imports and returns the requested module ``modname``, or skip the current test - if the module cannot be imported. - - :param str modname: the name of the module to import - :param str minversion: if given, the imported module ``__version__`` attribute must be - at least this minimal version, otherwise the test is still skipped. - :param str reason: if given, this reason is shown as the message when the module - cannot be imported. - """ - import warnings - - __tracebackhide__ = True - compile(modname, "", "eval") # to catch syntaxerrors - import_exc = None - - with warnings.catch_warnings(): - # make sure to ignore ImportWarnings that might happen because - # of existing directories with the same name we're trying to - # import but without a __init__.py file - warnings.simplefilter("ignore") - try: - __import__(modname) - except ImportError as exc: - # Do not raise chained exception here(#1485) - import_exc = exc - if import_exc: - if reason is None: - reason = "could not import %r: %s" % (modname, import_exc) - raise Skipped(reason, allow_module_level=True) - mod = sys.modules[modname] - if minversion is None: - return mod - verattr = getattr(mod, "__version__", None) - if minversion is not None: - if verattr is None or Version(verattr) < Version(minversion): - raise Skipped( - "module %r has __version__ %r, required is: %r" - % (modname, verattr, minversion), - allow_module_level=True, - ) - return mod diff --git a/contrib/python/pytest/py2/_pytest/pastebin.py b/contrib/python/pytest/py2/_pytest/pastebin.py deleted file mode 100644 index 7a3e80231c..0000000000 --- a/contrib/python/pytest/py2/_pytest/pastebin.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -""" submit failure or test session information to a pastebin service. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import tempfile - -import six - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group._addoption( - "--pastebin", - metavar="mode", - action="store", - dest="pastebin", - default=None, - choices=["failed", "all"], - help="send failed|all info to bpaste.net pastebin service.", - ) - - -@pytest.hookimpl(trylast=True) -def pytest_configure(config): - if config.option.pastebin == "all": - tr = config.pluginmanager.getplugin("terminalreporter") - # if no terminal reporter plugin is present, nothing we can do here; - # this can happen when this function executes in a slave node - # when using pytest-xdist, for example - if tr is not None: - # pastebin file will be utf-8 encoded binary file - config._pastebinfile = tempfile.TemporaryFile("w+b") - oldwrite = tr._tw.write - - def tee_write(s, **kwargs): - oldwrite(s, **kwargs) - if isinstance(s, six.text_type): - s = s.encode("utf-8") - config._pastebinfile.write(s) - - tr._tw.write = tee_write - - -def pytest_unconfigure(config): - if hasattr(config, "_pastebinfile"): - # get terminal contents and delete file - config._pastebinfile.seek(0) - sessionlog = config._pastebinfile.read() - config._pastebinfile.close() - del config._pastebinfile - # undo our patching in the terminal reporter - tr = config.pluginmanager.getplugin("terminalreporter") - del tr._tw.__dict__["write"] - # write summary - tr.write_sep("=", "Sending information to Paste Service") - pastebinurl = create_new_paste(sessionlog) - tr.write_line("pastebin session-log: %s\n" % pastebinurl) - - -def create_new_paste(contents): - """ - Creates a new paste using bpaste.net service. - - :contents: paste contents as utf-8 encoded bytes - :returns: url to the pasted contents - """ - import re - - if sys.version_info < (3, 0): - from urllib import urlopen, urlencode - else: - from urllib.request import urlopen - from urllib.parse import urlencode - - params = {"code": contents, "lexer": "text", "expiry": "1week"} - url = "https://bpaste.net" - response = urlopen(url, data=urlencode(params).encode("ascii")).read() - m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) - if m: - return "%s/show/%s" % (url, m.group(1)) - else: - return "bad response: " + response - - -def pytest_terminal_summary(terminalreporter): - import _pytest.config - - if terminalreporter.config.option.pastebin != "failed": - return - tr = terminalreporter - if "failed" in tr.stats: - terminalreporter.write_sep("=", "Sending information to Paste Service") - for rep in terminalreporter.stats.get("failed"): - try: - msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc - except AttributeError: - msg = tr._getfailureheadline(rep) - tw = _pytest.config.create_terminal_writer( - terminalreporter.config, stringio=True - ) - rep.toterminal(tw) - s = tw.stringio.getvalue() - assert len(s) - pastebinurl = create_new_paste(s) - tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/contrib/python/pytest/py2/_pytest/pathlib.py b/contrib/python/pytest/py2/_pytest/pathlib.py deleted file mode 100644 index 42071f4310..0000000000 --- a/contrib/python/pytest/py2/_pytest/pathlib.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import atexit -import errno -import fnmatch -import itertools -import operator -import os -import shutil -import sys -import uuid -import warnings -from functools import partial -from functools import reduce -from os.path import expanduser -from os.path import expandvars -from os.path import isabs -from os.path import sep -from posixpath import sep as posix_sep - -import six -from six.moves import map - -from .compat import PY36 -from _pytest.warning_types import PytestWarning - -if PY36: - from pathlib import Path, PurePath -else: - from pathlib2 import Path, PurePath - -__all__ = ["Path", "PurePath"] - - -LOCK_TIMEOUT = 60 * 60 * 3 - -get_lock_path = operator.methodcaller("joinpath", ".lock") - - -def ensure_reset_dir(path): - """ - ensures the given path is an empty directory - """ - if path.exists(): - rm_rf(path) - path.mkdir() - - -def on_rm_rf_error(func, path, exc, **kwargs): - """Handles known read-only errors during rmtree. - - The returned value is used only by our own tests. - """ - start_path = kwargs["start_path"] - exctype, excvalue = exc[:2] - - # another process removed the file in the middle of the "rm_rf" (xdist for example) - # more context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 - if isinstance(excvalue, OSError) and excvalue.errno == errno.ENOENT: - return False - - if not isinstance(excvalue, OSError) or excvalue.errno not in ( - errno.EACCES, - errno.EPERM, - ): - warnings.warn( - PytestWarning( - "(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue) - ) - ) - return False - - if func not in (os.rmdir, os.remove, os.unlink): - warnings.warn( - PytestWarning( - "(rm_rf) unknown function {} when removing {}:\n{}: {}".format( - path, func, exctype, excvalue - ) - ) - ) - return False - - # Chmod + retry. - import stat - - def chmod_rw(p): - mode = os.stat(p).st_mode - os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) - - # For files, we need to recursively go upwards in the directories to - # ensure they all are also writable. - p = Path(path) - if p.is_file(): - for parent in p.parents: - chmod_rw(str(parent)) - # stop when we reach the original path passed to rm_rf - if parent == start_path: - break - chmod_rw(str(path)) - - func(path) - return True - - -def rm_rf(path): - """Remove the path contents recursively, even if some elements - are read-only. - """ - onerror = partial(on_rm_rf_error, start_path=path) - shutil.rmtree(str(path), onerror=onerror) - - -def find_prefixed(root, prefix): - """finds all elements in root that begin with the prefix, case insensitive""" - l_prefix = prefix.lower() - for x in root.iterdir(): - if x.name.lower().startswith(l_prefix): - yield x - - -def extract_suffixes(iter, prefix): - """ - :param iter: iterator over path names - :param prefix: expected prefix of the path names - :returns: the parts of the paths following the prefix - """ - p_len = len(prefix) - for p in iter: - yield p.name[p_len:] - - -def find_suffixes(root, prefix): - """combines find_prefixes and extract_suffixes - """ - return extract_suffixes(find_prefixed(root, prefix), prefix) - - -def parse_num(maybe_num): - """parses number path suffixes, returns -1 on error""" - try: - return int(maybe_num) - except ValueError: - return -1 - - -if six.PY2: - - def _max(iterable, default): - """needed due to python2.7 lacking the default argument for max""" - return reduce(max, iterable, default) - - -else: - _max = max - - -def _force_symlink(root, target, link_to): - """helper to create the current symlink - - it's full of race conditions that are reasonably ok to ignore - for the context of best effort linking to the latest testrun - - the presumption being thatin case of much parallelism - the inaccuracy is going to be acceptable - """ - current_symlink = root.joinpath(target) - try: - current_symlink.unlink() - except OSError: - pass - try: - current_symlink.symlink_to(link_to) - except Exception: - pass - - -def make_numbered_dir(root, prefix): - """create a directory with an increased number as suffix for the given prefix""" - for i in range(10): - # try up to 10 times to create the folder - max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) - new_number = max_existing + 1 - new_path = root.joinpath("{}{}".format(prefix, new_number)) - try: - new_path.mkdir() - except Exception: - pass - else: - _force_symlink(root, prefix + "current", new_path) - return new_path - else: - raise EnvironmentError( - "could not create numbered dir with prefix " - "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) - ) - - -def create_cleanup_lock(p): - """crates a lock to prevent premature folder cleanup""" - lock_path = get_lock_path(p) - try: - fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) - except OSError as e: - if e.errno == errno.EEXIST: - six.raise_from( - EnvironmentError("cannot create lockfile in {path}".format(path=p)), e - ) - else: - raise - else: - pid = os.getpid() - spid = str(pid) - if not isinstance(spid, bytes): - spid = spid.encode("ascii") - os.write(fd, spid) - os.close(fd) - if not lock_path.is_file(): - raise EnvironmentError("lock path got renamed after successful creation") - return lock_path - - -def register_cleanup_lock_removal(lock_path, register=atexit.register): - """registers a cleanup function for removing a lock, by default on atexit""" - pid = os.getpid() - - def cleanup_on_exit(lock_path=lock_path, original_pid=pid): - current_pid = os.getpid() - if current_pid != original_pid: - # fork - return - try: - lock_path.unlink() - except (OSError, IOError): - pass - - return register(cleanup_on_exit) - - -def maybe_delete_a_numbered_dir(path): - """removes a numbered directory if its lock can be obtained and it does not seem to be in use""" - lock_path = None - try: - lock_path = create_cleanup_lock(path) - parent = path.parent - - garbage = parent.joinpath("garbage-{}".format(uuid.uuid4())) - path.rename(garbage) - rm_rf(garbage) - except (OSError, EnvironmentError): - # known races: - # * other process did a cleanup at the same time - # * deletable folder was found - # * process cwd (Windows) - return - finally: - # if we created the lock, ensure we remove it even if we failed - # to properly remove the numbered dir - if lock_path is not None: - try: - lock_path.unlink() - except (OSError, IOError): - pass - - -def ensure_deletable(path, consider_lock_dead_if_created_before): - """checks if a lock exists and breaks it if its considered dead""" - if path.is_symlink(): - return False - lock = get_lock_path(path) - if not lock.exists(): - return True - try: - lock_time = lock.stat().st_mtime - except Exception: - return False - else: - if lock_time < consider_lock_dead_if_created_before: - lock.unlink() - return True - else: - return False - - -def try_cleanup(path, consider_lock_dead_if_created_before): - """tries to cleanup a folder if we can ensure it's deletable""" - if ensure_deletable(path, consider_lock_dead_if_created_before): - maybe_delete_a_numbered_dir(path) - - -def cleanup_candidates(root, prefix, keep): - """lists candidates for numbered directories to be removed - follows py.path""" - max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) - max_delete = max_existing - keep - paths = find_prefixed(root, prefix) - paths, paths2 = itertools.tee(paths) - numbers = map(parse_num, extract_suffixes(paths2, prefix)) - for path, number in zip(paths, numbers): - if number <= max_delete: - yield path - - -def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before): - """cleanup for lock driven numbered directories""" - for path in cleanup_candidates(root, prefix, keep): - try_cleanup(path, consider_lock_dead_if_created_before) - for path in root.glob("garbage-*"): - try_cleanup(path, consider_lock_dead_if_created_before) - - -def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout): - """creates a numbered dir with a cleanup lock and removes old ones""" - e = None - for i in range(10): - try: - p = make_numbered_dir(root, prefix) - lock_path = create_cleanup_lock(p) - register_cleanup_lock_removal(lock_path) - except Exception as exc: - e = exc - else: - consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout - cleanup_numbered_dir( - root=root, - prefix=prefix, - keep=keep, - consider_lock_dead_if_created_before=consider_lock_dead_if_created_before, - ) - return p - assert e is not None - raise e - - -def resolve_from_str(input, root): - assert not isinstance(input, Path), "would break on py2" - root = Path(root) - input = expanduser(input) - input = expandvars(input) - if isabs(input): - return Path(input) - else: - return root.joinpath(input) - - -def fnmatch_ex(pattern, path): - """FNMatcher port from py.path.common which works with PurePath() instances. - - The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions - for each part of the path, while this algorithm uses the whole path instead. - - For example: - "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with - PurePath.match(). - - This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according - this logic. - - References: - * https://bugs.python.org/issue29249 - * https://bugs.python.org/issue34731 - """ - path = PurePath(path) - iswin32 = sys.platform.startswith("win") - - if iswin32 and sep not in pattern and posix_sep in pattern: - # Running on Windows, the pattern has no Windows path separators, - # and the pattern has one or more Posix path separators. Replace - # the Posix path separators with the Windows path separator. - pattern = pattern.replace(posix_sep, sep) - - if sep not in pattern: - name = path.name - else: - name = six.text_type(path) - return fnmatch.fnmatch(name, pattern) - - -def parts(s): - parts = s.split(sep) - return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} diff --git a/contrib/python/pytest/py2/_pytest/pytester.py b/contrib/python/pytest/py2/_pytest/pytester.py deleted file mode 100644 index f1d739c991..0000000000 --- a/contrib/python/pytest/py2/_pytest/pytester.py +++ /dev/null @@ -1,1413 +0,0 @@ -# -*- coding: utf-8 -*- -"""(disabled by default) support for testing pytest and pytest plugins.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import codecs -import gc -import os -import platform -import re -import subprocess -import sys -import time -import traceback -from fnmatch import fnmatch -from weakref import WeakKeyDictionary - -import py -import six - -import pytest -from _pytest._code import Source -from _pytest._io.saferepr import saferepr -from _pytest.assertion.rewrite import AssertionRewritingHook -from _pytest.capture import MultiCapture -from _pytest.capture import SysCapture -from _pytest.compat import safe_str -from _pytest.compat import Sequence -from _pytest.main import EXIT_INTERRUPTED -from _pytest.main import EXIT_OK -from _pytest.main import Session -from _pytest.monkeypatch import MonkeyPatch -from _pytest.pathlib import Path - -IGNORE_PAM = [ # filenames added when obtaining details about the current user - u"/var/lib/sss/mc/passwd" -] - - -def pytest_addoption(parser): - parser.addoption( - "--lsof", - action="store_true", - dest="lsof", - default=False, - help="run FD checks if lsof is available", - ) - - parser.addoption( - "--runpytest", - default="inprocess", - dest="runpytest", - choices=("inprocess", "subprocess"), - help=( - "run pytest sub runs in tests using an 'inprocess' " - "or 'subprocess' (python -m main) method" - ), - ) - - parser.addini( - "pytester_example_dir", help="directory to take the pytester example files from" - ) - - -def pytest_configure(config): - if config.getvalue("lsof"): - checker = LsofFdLeakChecker() - if checker.matching_platform(): - config.pluginmanager.register(checker) - - config.addinivalue_line( - "markers", - "pytester_example_path(*path_segments): join the given path " - "segments to `pytester_example_dir` for this test.", - ) - - -def raise_on_kwargs(kwargs): - __tracebackhide__ = True - if kwargs: # pragma: no branch - raise TypeError( - "Unexpected keyword arguments: {}".format(", ".join(sorted(kwargs))) - ) - - -class LsofFdLeakChecker(object): - def get_open_files(self): - out = self._exec_lsof() - open_files = self._parse_lsof_output(out) - return open_files - - def _exec_lsof(self): - pid = os.getpid() - # py3: use subprocess.DEVNULL directly. - with open(os.devnull, "wb") as devnull: - return subprocess.check_output( - ("lsof", "-Ffn0", "-p", str(pid)), stderr=devnull - ).decode() - - def _parse_lsof_output(self, out): - def isopen(line): - return line.startswith("f") and ( - "deleted" not in line - and "mem" not in line - and "txt" not in line - and "cwd" not in line - ) - - open_files = [] - - for line in out.split("\n"): - if isopen(line): - fields = line.split("\0") - fd = fields[0][1:] - filename = fields[1][1:] - if filename in IGNORE_PAM: - continue - if filename.startswith("/"): - open_files.append((fd, filename)) - - return open_files - - def matching_platform(self): - try: - subprocess.check_output(("lsof", "-v")) - except (OSError, subprocess.CalledProcessError): - return False - else: - return True - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_protocol(self, item): - lines1 = self.get_open_files() - yield - if hasattr(sys, "pypy_version_info"): - gc.collect() - lines2 = self.get_open_files() - - new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} - leaked_files = [t for t in lines2 if t[0] in new_fds] - if leaked_files: - error = [] - error.append("***** %s FD leakage detected" % len(leaked_files)) - error.extend([str(f) for f in leaked_files]) - error.append("*** Before:") - error.extend([str(f) for f in lines1]) - error.append("*** After:") - error.extend([str(f) for f in lines2]) - error.append(error[0]) - error.append("*** function %s:%s: %s " % item.location) - error.append("See issue #2366") - item.warn(pytest.PytestWarning("\n".join(error))) - - -# used at least by pytest-xdist plugin - - -@pytest.fixture -def _pytest(request): - """Return a helper which offers a gethookrecorder(hook) method which - returns a HookRecorder instance which helps to make assertions about called - hooks. - - """ - return PytestArg(request) - - -class PytestArg(object): - def __init__(self, request): - self.request = request - - def gethookrecorder(self, hook): - hookrecorder = HookRecorder(hook._pm) - self.request.addfinalizer(hookrecorder.finish_recording) - return hookrecorder - - -def get_public_names(values): - """Only return names from iterator values without a leading underscore.""" - return [x for x in values if x[0] != "_"] - - -class ParsedCall(object): - def __init__(self, name, kwargs): - self.__dict__.update(kwargs) - self._name = name - - def __repr__(self): - d = self.__dict__.copy() - del d["_name"] - return "<ParsedCall %r(**%r)>" % (self._name, d) - - -class HookRecorder(object): - """Record all hooks called in a plugin manager. - - This wraps all the hook calls in the plugin manager, recording each call - before propagating the normal calls. - - """ - - def __init__(self, pluginmanager): - self._pluginmanager = pluginmanager - self.calls = [] - - def before(hook_name, hook_impls, kwargs): - self.calls.append(ParsedCall(hook_name, kwargs)) - - def after(outcome, hook_name, hook_impls, kwargs): - pass - - self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) - - def finish_recording(self): - self._undo_wrapping() - - def getcalls(self, names): - if isinstance(names, str): - names = names.split() - return [call for call in self.calls if call._name in names] - - def assert_contains(self, entries): - __tracebackhide__ = True - i = 0 - entries = list(entries) - backlocals = sys._getframe(1).f_locals - while entries: - name, check = entries.pop(0) - for ind, call in enumerate(self.calls[i:]): - if call._name == name: - print("NAMEMATCH", name, call) - if eval(check, backlocals, call.__dict__): - print("CHECKERMATCH", repr(check), "->", call) - else: - print("NOCHECKERMATCH", repr(check), "-", call) - continue - i += ind + 1 - break - print("NONAMEMATCH", name, "with", call) - else: - pytest.fail("could not find %r check %r" % (name, check)) - - def popcall(self, name): - __tracebackhide__ = True - for i, call in enumerate(self.calls): - if call._name == name: - del self.calls[i] - return call - lines = ["could not find call %r, in:" % (name,)] - lines.extend([" %s" % x for x in self.calls]) - pytest.fail("\n".join(lines)) - - def getcall(self, name): - values = self.getcalls(name) - assert len(values) == 1, (name, values) - return values[0] - - # functionality for test reports - - def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): - return [x.report for x in self.getcalls(names)] - - def matchreport( - self, - inamepart="", - names="pytest_runtest_logreport pytest_collectreport", - when=None, - ): - """return a testreport whose dotted import path matches""" - values = [] - for rep in self.getreports(names=names): - if not when and rep.when != "call" and rep.passed: - # setup/teardown passing reports - let's ignore those - continue - if when and rep.when != when: - continue - if not inamepart or inamepart in rep.nodeid.split("::"): - values.append(rep) - if not values: - raise ValueError( - "could not find test report matching %r: " - "no test reports at all!" % (inamepart,) - ) - if len(values) > 1: - raise ValueError( - "found 2 or more testreports matching %r: %s" % (inamepart, values) - ) - return values[0] - - def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): - return [rep for rep in self.getreports(names) if rep.failed] - - def getfailedcollections(self): - return self.getfailures("pytest_collectreport") - - def listoutcomes(self): - passed = [] - skipped = [] - failed = [] - for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): - if rep.passed: - if rep.when == "call": - passed.append(rep) - elif rep.skipped: - skipped.append(rep) - else: - assert rep.failed, "Unexpected outcome: {!r}".format(rep) - failed.append(rep) - return passed, skipped, failed - - def countoutcomes(self): - return [len(x) for x in self.listoutcomes()] - - def assertoutcome(self, passed=0, skipped=0, failed=0): - realpassed, realskipped, realfailed = self.listoutcomes() - assert passed == len(realpassed) - assert skipped == len(realskipped) - assert failed == len(realfailed) - - def clear(self): - self.calls[:] = [] - - -@pytest.fixture -def linecomp(request): - return LineComp() - - -@pytest.fixture(name="LineMatcher") -def LineMatcher_fixture(request): - return LineMatcher - - -@pytest.fixture -def testdir(request, tmpdir_factory): - return Testdir(request, tmpdir_factory) - - -@pytest.fixture -def _sys_snapshot(): - snappaths = SysPathsSnapshot() - snapmods = SysModulesSnapshot() - yield - snapmods.restore() - snappaths.restore() - - -@pytest.fixture -def _config_for_test(): - from _pytest.config import get_config - - config = get_config() - yield config - config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. - - -rex_outcome = re.compile(r"(\d+) ([\w-]+)") - - -class RunResult(object): - """The result of running a command. - - Attributes: - - :ret: the return value - :outlines: list of lines captured from stdout - :errlines: list of lines captures from stderr - :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to - reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` - method - :stderr: :py:class:`LineMatcher` of stderr - :duration: duration in seconds - - """ - - def __init__(self, ret, outlines, errlines, duration): - self.ret = ret - self.outlines = outlines - self.errlines = errlines - self.stdout = LineMatcher(outlines) - self.stderr = LineMatcher(errlines) - self.duration = duration - - def __repr__(self): - return ( - "<RunResult ret=%r len(stdout.lines)=%d len(stderr.lines)=%d duration=%.2fs>" - % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) - ) - - def parseoutcomes(self): - """Return a dictionary of outcomestring->num from parsing the terminal - output that the test process produced. - - """ - for line in reversed(self.outlines): - if "seconds" in line: - outcomes = rex_outcome.findall(line) - if outcomes: - d = {} - for num, cat in outcomes: - d[cat] = int(num) - return d - raise ValueError("Pytest terminal report not found") - - def assert_outcomes( - self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0 - ): - """Assert that the specified outcomes appear with the respective - numbers (0 means it didn't occur) in the text output from a test run. - - """ - d = self.parseoutcomes() - obtained = { - "passed": d.get("passed", 0), - "skipped": d.get("skipped", 0), - "failed": d.get("failed", 0), - "error": d.get("error", 0), - "xpassed": d.get("xpassed", 0), - "xfailed": d.get("xfailed", 0), - } - expected = { - "passed": passed, - "skipped": skipped, - "failed": failed, - "error": error, - "xpassed": xpassed, - "xfailed": xfailed, - } - assert obtained == expected - - -class CwdSnapshot(object): - def __init__(self): - self.__saved = os.getcwd() - - def restore(self): - os.chdir(self.__saved) - - -class SysModulesSnapshot(object): - def __init__(self, preserve=None): - self.__preserve = preserve - self.__saved = dict(sys.modules) - - def restore(self): - if self.__preserve: - self.__saved.update( - (k, m) for k, m in sys.modules.items() if self.__preserve(k) - ) - sys.modules.clear() - sys.modules.update(self.__saved) - - -class SysPathsSnapshot(object): - def __init__(self): - self.__saved = list(sys.path), list(sys.meta_path) - - def restore(self): - sys.path[:], sys.meta_path[:] = self.__saved - - -class Testdir(object): - """Temporary test directory with tools to test/run pytest itself. - - This is based on the ``tmpdir`` fixture but provides a number of methods - which aid with testing pytest itself. Unless :py:meth:`chdir` is used all - methods will use :py:attr:`tmpdir` as their current working directory. - - Attributes: - - :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. - - :plugins: A list of plugins to use with :py:meth:`parseconfig` and - :py:meth:`runpytest`. Initially this is an empty list but plugins can - be added to the list. The type of items to add to the list depends on - the method using them so refer to them for details. - - """ - - CLOSE_STDIN = object - - class TimeoutExpired(Exception): - pass - - def __init__(self, request, tmpdir_factory): - self.request = request - self._mod_collections = WeakKeyDictionary() - name = request.function.__name__ - self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) - self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True) - self.plugins = [] - self._cwd_snapshot = CwdSnapshot() - self._sys_path_snapshot = SysPathsSnapshot() - self._sys_modules_snapshot = self.__take_sys_modules_snapshot() - self.chdir() - self.request.addfinalizer(self.finalize) - method = self.request.config.getoption("--runpytest") - if method == "inprocess": - self._runpytest_method = self.runpytest_inprocess - elif method == "subprocess": - self._runpytest_method = self.runpytest_subprocess - - mp = self.monkeypatch = MonkeyPatch() - mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self.test_tmproot)) - # Ensure no unexpected caching via tox. - mp.delenv("TOX_ENV_DIR", raising=False) - # Discard outer pytest options. - mp.delenv("PYTEST_ADDOPTS", raising=False) - - # Environment (updates) for inner runs. - tmphome = str(self.tmpdir) - self._env_run_update = {"HOME": tmphome, "USERPROFILE": tmphome} - - def __repr__(self): - return "<Testdir %r>" % (self.tmpdir,) - - def __str__(self): - return str(self.tmpdir) - - def finalize(self): - """Clean up global state artifacts. - - Some methods modify the global interpreter state and this tries to - clean this up. It does not remove the temporary directory however so - it can be looked at after the test run has finished. - - """ - self._sys_modules_snapshot.restore() - self._sys_path_snapshot.restore() - self._cwd_snapshot.restore() - self.monkeypatch.undo() - - def __take_sys_modules_snapshot(self): - # some zope modules used by twisted-related tests keep internal state - # and can't be deleted; we had some trouble in the past with - # `zope.interface` for example - def preserve_module(name): - return name.startswith("zope") - - return SysModulesSnapshot(preserve=preserve_module) - - def make_hook_recorder(self, pluginmanager): - """Create a new :py:class:`HookRecorder` for a PluginManager.""" - pluginmanager.reprec = reprec = HookRecorder(pluginmanager) - self.request.addfinalizer(reprec.finish_recording) - return reprec - - def chdir(self): - """Cd into the temporary directory. - - This is done automatically upon instantiation. - - """ - self.tmpdir.chdir() - - def _makefile(self, ext, args, kwargs, encoding="utf-8"): - items = list(kwargs.items()) - - def to_text(s): - return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) - - if args: - source = u"\n".join(to_text(x) for x in args) - basename = self.request.function.__name__ - items.insert(0, (basename, source)) - - ret = None - for basename, value in items: - p = self.tmpdir.join(basename).new(ext=ext) - p.dirpath().ensure_dir() - source = Source(value) - source = u"\n".join(to_text(line) for line in source.lines) - p.write(source.strip().encode(encoding), "wb") - if ret is None: - ret = p - return ret - - def makefile(self, ext, *args, **kwargs): - r"""Create new file(s) in the testdir. - - :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. - :param list[str] args: All args will be treated as strings and joined using newlines. - The result will be written as contents to the file. The name of the - file will be based on the test function requesting this fixture. - :param kwargs: Each keyword is the name of a file, while the value of it will - be written as contents of the file. - - Examples: - - .. code-block:: python - - testdir.makefile(".txt", "line1", "line2") - - testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") - - """ - return self._makefile(ext, args, kwargs) - - def makeconftest(self, source): - """Write a contest.py file with 'source' as contents.""" - return self.makepyfile(conftest=source) - - def makeini(self, source): - """Write a tox.ini file with 'source' as contents.""" - return self.makefile(".ini", tox=source) - - def getinicfg(self, source): - """Return the pytest section from the tox.ini config file.""" - p = self.makeini(source) - return py.iniconfig.IniConfig(p)["pytest"] - - def makepyfile(self, *args, **kwargs): - """Shortcut for .makefile() with a .py extension.""" - return self._makefile(".py", args, kwargs) - - def maketxtfile(self, *args, **kwargs): - """Shortcut for .makefile() with a .txt extension.""" - return self._makefile(".txt", args, kwargs) - - def syspathinsert(self, path=None): - """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. - - This is undone automatically when this object dies at the end of each - test. - """ - if path is None: - path = self.tmpdir - - self.monkeypatch.syspath_prepend(str(path)) - - def mkdir(self, name): - """Create a new (sub)directory.""" - return self.tmpdir.mkdir(name) - - def mkpydir(self, name): - """Create a new python package. - - This creates a (sub)directory with an empty ``__init__.py`` file so it - gets recognised as a python package. - - """ - p = self.mkdir(name) - p.ensure("__init__.py") - return p - - def copy_example(self, name=None): - import warnings - from _pytest.warning_types import PYTESTER_COPY_EXAMPLE - - warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2) - example_dir = self.request.config.getini("pytester_example_dir") - if example_dir is None: - raise ValueError("pytester_example_dir is unset, can't copy examples") - example_dir = self.request.config.rootdir.join(example_dir) - - for extra_element in self.request.node.iter_markers("pytester_example_path"): - assert extra_element.args - example_dir = example_dir.join(*extra_element.args) - - if name is None: - func_name = self.request.function.__name__ - maybe_dir = example_dir / func_name - maybe_file = example_dir / (func_name + ".py") - - if maybe_dir.isdir(): - example_path = maybe_dir - elif maybe_file.isfile(): - example_path = maybe_file - else: - raise LookupError( - "{} cant be found as module or package in {}".format( - func_name, example_dir.bestrelpath(self.request.config.rootdir) - ) - ) - else: - example_path = example_dir.join(name) - - if example_path.isdir() and not example_path.join("__init__.py").isfile(): - example_path.copy(self.tmpdir) - return self.tmpdir - elif example_path.isfile(): - result = self.tmpdir.join(example_path.basename) - example_path.copy(result) - return result - else: - raise LookupError( - 'example "{}" is not found as a file or directory'.format(example_path) - ) - - Session = Session - - def getnode(self, config, arg): - """Return the collection node of a file. - - :param config: :py:class:`_pytest.config.Config` instance, see - :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the - configuration - - :param arg: a :py:class:`py.path.local` instance of the file - - """ - session = Session(config) - assert "::" not in str(arg) - p = py.path.local(arg) - config.hook.pytest_sessionstart(session=session) - res = session.perform_collect([str(p)], genitems=False)[0] - config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) - return res - - def getpathnode(self, path): - """Return the collection node of a file. - - This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to - create the (configured) pytest Config instance. - - :param path: a :py:class:`py.path.local` instance of the file - - """ - config = self.parseconfigure(path) - session = Session(config) - x = session.fspath.bestrelpath(path) - config.hook.pytest_sessionstart(session=session) - res = session.perform_collect([x], genitems=False)[0] - config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) - return res - - def genitems(self, colitems): - """Generate all test items from a collection node. - - This recurses into the collection node and returns a list of all the - test items contained within. - - """ - session = colitems[0].session - result = [] - for colitem in colitems: - result.extend(session.genitems(colitem)) - return result - - def runitem(self, source): - """Run the "test_func" Item. - - The calling test instance (class containing the test method) must - provide a ``.getrunner()`` method which should return a runner which - can run the test protocol for a single item, e.g. - :py:func:`_pytest.runner.runtestprotocol`. - - """ - # used from runner functional tests - item = self.getitem(source) - # the test class where we are called from wants to provide the runner - testclassinstance = self.request.instance - runner = testclassinstance.getrunner() - return runner(item) - - def inline_runsource(self, source, *cmdlineargs): - """Run a test module in process using ``pytest.main()``. - - This run writes "source" into a temporary file and runs - ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance - for the result. - - :param source: the source code of the test module - - :param cmdlineargs: any extra command line arguments to use - - :return: :py:class:`HookRecorder` instance of the result - - """ - p = self.makepyfile(source) - values = list(cmdlineargs) + [p] - return self.inline_run(*values) - - def inline_genitems(self, *args): - """Run ``pytest.main(['--collectonly'])`` in-process. - - Runs the :py:func:`pytest.main` function to run all of pytest inside - the test process itself like :py:meth:`inline_run`, but returns a - tuple of the collected items and a :py:class:`HookRecorder` instance. - - """ - rec = self.inline_run("--collect-only", *args) - items = [x.item for x in rec.getcalls("pytest_itemcollected")] - return items, rec - - def inline_run(self, *args, **kwargs): - """Run ``pytest.main()`` in-process, returning a HookRecorder. - - Runs the :py:func:`pytest.main` function to run all of pytest inside - the test process itself. This means it can return a - :py:class:`HookRecorder` instance which gives more detailed results - from that run than can be done by matching stdout/stderr from - :py:meth:`runpytest`. - - :param args: command line arguments to pass to :py:func:`pytest.main` - - :param plugins: (keyword-only) extra plugin instances the - ``pytest.main()`` instance should use - - :return: a :py:class:`HookRecorder` instance - """ - plugins = kwargs.pop("plugins", []) - no_reraise_ctrlc = kwargs.pop("no_reraise_ctrlc", None) - raise_on_kwargs(kwargs) - - finalizers = [] - try: - # Do not load user config (during runs only). - mp_run = MonkeyPatch() - for k, v in self._env_run_update.items(): - mp_run.setenv(k, v) - finalizers.append(mp_run.undo) - - # When running pytest inline any plugins active in the main test - # process are already imported. So this disables the warning which - # will trigger to say they can no longer be rewritten, which is - # fine as they have already been rewritten. - orig_warn = AssertionRewritingHook._warn_already_imported - - def revert_warn_already_imported(): - AssertionRewritingHook._warn_already_imported = orig_warn - - finalizers.append(revert_warn_already_imported) - AssertionRewritingHook._warn_already_imported = lambda *a: None - - # Any sys.module or sys.path changes done while running pytest - # inline should be reverted after the test run completes to avoid - # clashing with later inline tests run within the same pytest test, - # e.g. just because they use matching test module names. - finalizers.append(self.__take_sys_modules_snapshot().restore) - finalizers.append(SysPathsSnapshot().restore) - - # Important note: - # - our tests should not leave any other references/registrations - # laying around other than possibly loaded test modules - # referenced from sys.modules, as nothing will clean those up - # automatically - - rec = [] - - class Collect(object): - def pytest_configure(x, config): - rec.append(self.make_hook_recorder(config.pluginmanager)) - - plugins.append(Collect()) - ret = pytest.main(list(args), plugins=plugins) - if len(rec) == 1: - reprec = rec.pop() - else: - - class reprec(object): - pass - - reprec.ret = ret - - # typically we reraise keyboard interrupts from the child run - # because it's our user requesting interruption of the testing - if ret == EXIT_INTERRUPTED and not no_reraise_ctrlc: - calls = reprec.getcalls("pytest_keyboard_interrupt") - if calls and calls[-1].excinfo.type == KeyboardInterrupt: - raise KeyboardInterrupt() - return reprec - finally: - for finalizer in finalizers: - finalizer() - - def runpytest_inprocess(self, *args, **kwargs): - """Return result of running pytest in-process, providing a similar - interface to what self.runpytest() provides. - """ - syspathinsert = kwargs.pop("syspathinsert", False) - - if syspathinsert: - self.syspathinsert() - now = time.time() - capture = MultiCapture(Capture=SysCapture) - capture.start_capturing() - try: - try: - reprec = self.inline_run(*args, **kwargs) - except SystemExit as e: - - class reprec(object): - ret = e.args[0] - - except Exception: - traceback.print_exc() - - class reprec(object): - ret = 3 - - finally: - out, err = capture.readouterr() - capture.stop_capturing() - sys.stdout.write(out) - sys.stderr.write(err) - - res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) - res.reprec = reprec - return res - - def runpytest(self, *args, **kwargs): - """Run pytest inline or in a subprocess, depending on the command line - option "--runpytest" and return a :py:class:`RunResult`. - - """ - args = self._ensure_basetemp(args) - return self._runpytest_method(*args, **kwargs) - - def _ensure_basetemp(self, args): - args = list(args) - for x in args: - if safe_str(x).startswith("--basetemp"): - break - else: - args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) - return args - - def parseconfig(self, *args): - """Return a new pytest Config instance from given commandline args. - - This invokes the pytest bootstrapping code in _pytest.config to create - a new :py:class:`_pytest.core.PluginManager` and call the - pytest_cmdline_parse hook to create a new - :py:class:`_pytest.config.Config` instance. - - If :py:attr:`plugins` has been populated they should be plugin modules - to be registered with the PluginManager. - - """ - args = self._ensure_basetemp(args) - - import _pytest.config - - config = _pytest.config._prepareconfig(args, self.plugins) - # we don't know what the test will do with this half-setup config - # object and thus we make sure it gets unconfigured properly in any - # case (otherwise capturing could still be active, for example) - self.request.addfinalizer(config._ensure_unconfigure) - return config - - def parseconfigure(self, *args): - """Return a new pytest configured Config instance. - - This returns a new :py:class:`_pytest.config.Config` instance like - :py:meth:`parseconfig`, but also calls the pytest_configure hook. - - """ - config = self.parseconfig(*args) - config._do_configure() - self.request.addfinalizer(config._ensure_unconfigure) - return config - - def getitem(self, source, funcname="test_func"): - """Return the test item for a test function. - - This writes the source to a python file and runs pytest's collection on - the resulting module, returning the test item for the requested - function name. - - :param source: the module source - - :param funcname: the name of the test function for which to return a - test item - - """ - items = self.getitems(source) - for item in items: - if item.name == funcname: - return item - assert 0, "%r item not found in module:\n%s\nitems: %s" % ( - funcname, - source, - items, - ) - - def getitems(self, source): - """Return all test items collected from the module. - - This writes the source to a python file and runs pytest's collection on - the resulting module, returning all test items contained within. - - """ - modcol = self.getmodulecol(source) - return self.genitems([modcol]) - - def getmodulecol(self, source, configargs=(), withinit=False): - """Return the module collection node for ``source``. - - This writes ``source`` to a file using :py:meth:`makepyfile` and then - runs the pytest collection on it, returning the collection node for the - test module. - - :param source: the source code of the module to collect - - :param configargs: any extra arguments to pass to - :py:meth:`parseconfigure` - - :param withinit: whether to also write an ``__init__.py`` file to the - same directory to ensure it is a package - - """ - if isinstance(source, Path): - path = self.tmpdir.join(str(source)) - assert not withinit, "not supported for paths" - else: - kw = {self.request.function.__name__: Source(source).strip()} - path = self.makepyfile(**kw) - if withinit: - self.makepyfile(__init__="#") - self.config = config = self.parseconfigure(path, *configargs) - return self.getnode(config, path) - - def collect_by_name(self, modcol, name): - """Return the collection node for name from the module collection. - - This will search a module collection node for a collection node - matching the given name. - - :param modcol: a module collection node; see :py:meth:`getmodulecol` - - :param name: the name of the node to return - - """ - if modcol not in self._mod_collections: - self._mod_collections[modcol] = list(modcol.collect()) - for colitem in self._mod_collections[modcol]: - if colitem.name == name: - return colitem - - def popen( - self, - cmdargs, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=CLOSE_STDIN, - **kw - ): - """Invoke subprocess.Popen. - - This calls subprocess.Popen making sure the current working directory - is in the PYTHONPATH. - - You probably want to use :py:meth:`run` instead. - - """ - env = os.environ.copy() - env["PYTHONPATH"] = os.pathsep.join( - filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) - ) - env.update(self._env_run_update) - kw["env"] = env - - if stdin is Testdir.CLOSE_STDIN: - kw["stdin"] = subprocess.PIPE - elif isinstance(stdin, bytes): - kw["stdin"] = subprocess.PIPE - else: - kw["stdin"] = stdin - - popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) - if stdin is Testdir.CLOSE_STDIN: - popen.stdin.close() - elif isinstance(stdin, bytes): - popen.stdin.write(stdin) - - return popen - - def run(self, *cmdargs, **kwargs): - """Run a command with arguments. - - Run a process using subprocess.Popen saving the stdout and stderr. - - :param args: the sequence of arguments to pass to `subprocess.Popen()` - :param timeout: the period in seconds after which to timeout and raise - :py:class:`Testdir.TimeoutExpired` - :param stdin: optional standard input. Bytes are being send, closing - the pipe, otherwise it is passed through to ``popen``. - Defaults to ``CLOSE_STDIN``, which translates to using a pipe - (``subprocess.PIPE``) that gets closed. - - Returns a :py:class:`RunResult`. - - """ - __tracebackhide__ = True - - timeout = kwargs.pop("timeout", None) - stdin = kwargs.pop("stdin", Testdir.CLOSE_STDIN) - raise_on_kwargs(kwargs) - - cmdargs = [ - str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs - ] - p1 = self.tmpdir.join("stdout") - p2 = self.tmpdir.join("stderr") - print("running:", *cmdargs) - print(" in:", py.path.local()) - f1 = codecs.open(str(p1), "w", encoding="utf8") - f2 = codecs.open(str(p2), "w", encoding="utf8") - try: - now = time.time() - popen = self.popen( - cmdargs, - stdin=stdin, - stdout=f1, - stderr=f2, - close_fds=(sys.platform != "win32"), - ) - if isinstance(stdin, bytes): - popen.stdin.close() - - def handle_timeout(): - __tracebackhide__ = True - - timeout_message = ( - "{seconds} second timeout expired running:" - " {command}".format(seconds=timeout, command=cmdargs) - ) - - popen.kill() - popen.wait() - raise self.TimeoutExpired(timeout_message) - - if timeout is None: - ret = popen.wait() - elif not six.PY2: - try: - ret = popen.wait(timeout) - except subprocess.TimeoutExpired: - handle_timeout() - else: - end = time.time() + timeout - - resolution = min(0.1, timeout / 10) - - while True: - ret = popen.poll() - if ret is not None: - break - - if time.time() > end: - handle_timeout() - - time.sleep(resolution) - finally: - f1.close() - f2.close() - f1 = codecs.open(str(p1), "r", encoding="utf8") - f2 = codecs.open(str(p2), "r", encoding="utf8") - try: - out = f1.read().splitlines() - err = f2.read().splitlines() - finally: - f1.close() - f2.close() - self._dump_lines(out, sys.stdout) - self._dump_lines(err, sys.stderr) - return RunResult(ret, out, err, time.time() - now) - - def _dump_lines(self, lines, fp): - try: - for line in lines: - print(line, file=fp) - except UnicodeEncodeError: - print("couldn't print to %s because of encoding" % (fp,)) - - def _getpytestargs(self): - return sys.executable, "-mpytest" - - def runpython(self, script): - """Run a python script using sys.executable as interpreter. - - Returns a :py:class:`RunResult`. - - """ - return self.run(sys.executable, script) - - def runpython_c(self, command): - """Run python -c "command", return a :py:class:`RunResult`.""" - return self.run(sys.executable, "-c", command) - - def runpytest_subprocess(self, *args, **kwargs): - """Run pytest as a subprocess with given arguments. - - Any plugins added to the :py:attr:`plugins` list will be added using the - ``-p`` command line option. Additionally ``--basetemp`` is used to put - any temporary files and directories in a numbered directory prefixed - with "runpytest-" to not conflict with the normal numbered pytest - location for temporary files and directories. - - :param args: the sequence of arguments to pass to the pytest subprocess - :param timeout: the period in seconds after which to timeout and raise - :py:class:`Testdir.TimeoutExpired` - - Returns a :py:class:`RunResult`. - """ - __tracebackhide__ = True - timeout = kwargs.pop("timeout", None) - raise_on_kwargs(kwargs) - - p = py.path.local.make_numbered_dir( - prefix="runpytest-", keep=None, rootdir=self.tmpdir - ) - args = ("--basetemp=%s" % p,) + args - plugins = [x for x in self.plugins if isinstance(x, str)] - if plugins: - args = ("-p", plugins[0]) + args - args = self._getpytestargs() + args - return self.run(*args, timeout=timeout) - - def spawn_pytest(self, string, expect_timeout=10.0): - """Run pytest using pexpect. - - This makes sure to use the right pytest and sets up the temporary - directory locations. - - The pexpect child is returned. - - """ - basetemp = self.tmpdir.mkdir("temp-pexpect") - invoke = " ".join(map(str, self._getpytestargs())) - cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) - return self.spawn(cmd, expect_timeout=expect_timeout) - - def spawn(self, cmd, expect_timeout=10.0): - """Run a command using pexpect. - - The pexpect child is returned. - - """ - pexpect = pytest.importorskip("pexpect", "3.0") - if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): - pytest.skip("pypy-64 bit not supported") - if sys.platform.startswith("freebsd"): - pytest.xfail("pexpect does not work reliably on freebsd") - logfile = self.tmpdir.join("spawn.out").open("wb") - - # Do not load user config. - env = os.environ.copy() - env.update(self._env_run_update) - - child = pexpect.spawn(cmd, logfile=logfile, env=env) - self.request.addfinalizer(logfile.close) - child.timeout = expect_timeout - return child - - -def getdecoded(out): - try: - return out.decode("utf-8") - except UnicodeDecodeError: - return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (saferepr(out),) - - -class LineComp(object): - def __init__(self): - self.stringio = py.io.TextIO() - - def assert_contains_lines(self, lines2): - """Assert that lines2 are contained (linearly) in lines1. - - Return a list of extralines found. - - """ - __tracebackhide__ = True - val = self.stringio.getvalue() - self.stringio.truncate(0) - self.stringio.seek(0) - lines1 = val.split("\n") - return LineMatcher(lines1).fnmatch_lines(lines2) - - -class LineMatcher(object): - """Flexible matching of text. - - This is a convenience class to test large texts like the output of - commands. - - The constructor takes a list of lines without their trailing newlines, i.e. - ``text.splitlines()``. - - """ - - def __init__(self, lines): - self.lines = lines - self._log_output = [] - - def str(self): - """Return the entire original text.""" - return "\n".join(self.lines) - - def _getlines(self, lines2): - if isinstance(lines2, str): - lines2 = Source(lines2) - if isinstance(lines2, Source): - lines2 = lines2.strip().lines - return lines2 - - def fnmatch_lines_random(self, lines2): - """Check lines exist in the output using in any order. - - Lines are checked using ``fnmatch.fnmatch``. The argument is a list of - lines which have to occur in the output, in any order. - - """ - self._match_lines_random(lines2, fnmatch) - - def re_match_lines_random(self, lines2): - """Check lines exist in the output using ``re.match``, in any order. - - The argument is a list of lines which have to occur in the output, in - any order. - - """ - self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) - - def _match_lines_random(self, lines2, match_func): - """Check lines exist in the output. - - The argument is a list of lines which have to occur in the output, in - any order. Each line can contain glob whildcards. - - """ - lines2 = self._getlines(lines2) - for line in lines2: - for x in self.lines: - if line == x or match_func(x, line): - self._log("matched: ", repr(line)) - break - else: - self._log("line %r not found in output" % line) - raise ValueError(self._log_text) - - def get_lines_after(self, fnline): - """Return all lines following the given line in the text. - - The given line can contain glob wildcards. - - """ - for i, line in enumerate(self.lines): - if fnline == line or fnmatch(line, fnline): - return self.lines[i + 1 :] - raise ValueError("line %r not found in output" % fnline) - - def _log(self, *args): - self._log_output.append(" ".join(str(x) for x in args)) - - @property - def _log_text(self): - return "\n".join(self._log_output) - - def fnmatch_lines(self, lines2): - """Search captured text for matching lines using ``fnmatch.fnmatch``. - - The argument is a list of lines which have to match and can use glob - wildcards. If they do not match a pytest.fail() is called. The - matches and non-matches are also printed on stdout. - - """ - __tracebackhide__ = True - self._match_lines(lines2, fnmatch, "fnmatch") - - def re_match_lines(self, lines2): - """Search captured text for matching lines using ``re.match``. - - The argument is a list of lines which have to match using ``re.match``. - If they do not match a pytest.fail() is called. - - The matches and non-matches are also printed on stdout. - - """ - __tracebackhide__ = True - self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") - - def _match_lines(self, lines2, match_func, match_nickname): - """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. - - :param list[str] lines2: list of string patterns to match. The actual - format depends on ``match_func`` - :param match_func: a callable ``match_func(line, pattern)`` where line - is the captured line from stdout/stderr and pattern is the matching - pattern - :param str match_nickname: the nickname for the match function that - will be logged to stdout when a match occurs - - """ - assert isinstance(lines2, Sequence) - lines2 = self._getlines(lines2) - lines1 = self.lines[:] - nextline = None - extralines = [] - __tracebackhide__ = True - for line in lines2: - nomatchprinted = False - while lines1: - nextline = lines1.pop(0) - if line == nextline: - self._log("exact match:", repr(line)) - break - elif match_func(nextline, line): - self._log("%s:" % match_nickname, repr(line)) - self._log(" with:", repr(nextline)) - break - else: - if not nomatchprinted: - self._log("nomatch:", repr(line)) - nomatchprinted = True - self._log(" and:", repr(nextline)) - extralines.append(nextline) - else: - self._log("remains unmatched: %r" % (line,)) - pytest.fail(self._log_text) diff --git a/contrib/python/pytest/py2/_pytest/python.py b/contrib/python/pytest/py2/_pytest/python.py deleted file mode 100644 index f7c368b0c4..0000000000 --- a/contrib/python/pytest/py2/_pytest/python.py +++ /dev/null @@ -1,1507 +0,0 @@ -# -*- coding: utf-8 -*- -""" Python test discovery, setup and run of test functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import fnmatch -import inspect -import os -import sys -import warnings -from functools import partial -from textwrap import dedent - -import py -import six - -import _pytest -from _pytest import deprecated -from _pytest import fixtures -from _pytest import nodes -from _pytest._code import filter_traceback -from _pytest.compat import ascii_escaped -from _pytest.compat import enum -from _pytest.compat import get_default_arg_names -from _pytest.compat import get_real_func -from _pytest.compat import getfslineno -from _pytest.compat import getimfunc -from _pytest.compat import getlocation -from _pytest.compat import is_generator -from _pytest.compat import isclass -from _pytest.compat import isfunction -from _pytest.compat import NOTSET -from _pytest.compat import REGEX_TYPE -from _pytest.compat import safe_getattr -from _pytest.compat import safe_isclass -from _pytest.compat import safe_str -from _pytest.compat import STRING_TYPES -from _pytest.config import hookimpl -from _pytest.main import FSHookProxy -from _pytest.mark import MARK_GEN -from _pytest.mark.structures import get_unpacked_marks -from _pytest.mark.structures import normalize_mark_list -from _pytest.outcomes import fail -from _pytest.outcomes import skip -from _pytest.pathlib import parts -from _pytest.warning_types import PytestCollectionWarning -from _pytest.warning_types import PytestUnhandledCoroutineWarning - - -def pyobj_property(name): - def get(self): - node = self.getparent(getattr(__import__("pytest"), name)) - if node is not None: - return node.obj - - doc = "python %s object this node was collected from (can be None)." % ( - name.lower(), - ) - return property(get, None, None, doc) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--fixtures", - "--funcargs", - action="store_true", - dest="showfixtures", - default=False, - help="show available fixtures, sorted by plugin appearance " - "(fixtures with leading '_' are only shown with '-v')", - ) - group.addoption( - "--fixtures-per-test", - action="store_true", - dest="show_fixtures_per_test", - default=False, - help="show fixtures per test", - ) - parser.addini( - "python_files", - type="args", - # NOTE: default is also used in AssertionRewritingHook. - default=["test_*.py", "*_test.py"], - help="glob-style file patterns for Python test module discovery", - ) - parser.addini( - "python_classes", - type="args", - default=["Test"], - help="prefixes or glob names for Python test class discovery", - ) - parser.addini( - "python_functions", - type="args", - default=["test"], - help="prefixes or glob names for Python test function and method discovery", - ) - parser.addini( - "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", - type="bool", - default=False, - help="disable string escape non-ascii characters, might cause unwanted " - "side effects(use at your own risk)", - ) - - group.addoption( - "--import-mode", - default="prepend", - choices=["prepend", "append"], - dest="importmode", - help="prepend/append to sys.path when importing test modules, " - "default is to prepend.", - ) - - -def pytest_cmdline_main(config): - if config.option.showfixtures: - showfixtures(config) - return 0 - if config.option.show_fixtures_per_test: - show_fixtures_per_test(config) - return 0 - - -def pytest_generate_tests(metafunc): - # those alternative spellings are common - raise a specific error to alert - # the user - alt_spellings = ["parameterize", "parametrise", "parameterise"] - for mark_name in alt_spellings: - if metafunc.definition.get_closest_marker(mark_name): - msg = "{0} has '{1}' mark, spelling should be 'parametrize'" - fail(msg.format(metafunc.function.__name__, mark_name), pytrace=False) - for marker in metafunc.definition.iter_markers(name="parametrize"): - metafunc.parametrize(*marker.args, **marker.kwargs) - - -def pytest_configure(config): - config.addinivalue_line( - "markers", - "parametrize(argnames, argvalues): call a test function multiple " - "times passing in different arguments in turn. argvalues generally " - "needs to be a list of values if argnames specifies only one name " - "or a list of tuples of values if argnames specifies multiple names. " - "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " - "decorated test function, one with arg1=1 and another with arg1=2." - "see https://docs.pytest.org/en/latest/parametrize.html for more info " - "and examples.", - ) - config.addinivalue_line( - "markers", - "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " - "all of the specified fixtures. see " - "https://docs.pytest.org/en/latest/fixture.html#usefixtures ", - ) - - -@hookimpl(trylast=True) -def pytest_pyfunc_call(pyfuncitem): - testfunction = pyfuncitem.obj - iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) - if iscoroutinefunction is not None and iscoroutinefunction(testfunction): - msg = "Coroutine functions are not natively supported and have been skipped.\n" - msg += "You need to install a suitable plugin for your async framework, for example:\n" - msg += " - pytest-asyncio\n" - msg += " - pytest-trio\n" - msg += " - pytest-tornasync" - warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid))) - skip(msg="coroutine function and no async plugin installed (see warnings)") - funcargs = pyfuncitem.funcargs - testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} - testfunction(**testargs) - return True - - -def pytest_collect_file(path, parent): - ext = path.ext - if ext == ".py": - if not parent.session.isinitpath(path): - if not path_matches_patterns( - path, parent.config.getini("python_files") + ["__init__.py"] - ): - return - ihook = parent.session.gethookproxy(path) - return ihook.pytest_pycollect_makemodule(path=path, parent=parent) - - -def path_matches_patterns(path, patterns): - """Returns True if the given py.path.local matches one of the patterns in the list of globs given""" - return any(path.fnmatch(pattern) for pattern in patterns) - - -def pytest_pycollect_makemodule(path, parent): - if path.basename == "__init__.py": - return Package(path, parent) - return Module(path, parent) - - -@hookimpl(hookwrapper=True) -def pytest_pycollect_makeitem(collector, name, obj): - outcome = yield - res = outcome.get_result() - if res is not None: - return - # nothing was collected elsewhere, let's do it here - if safe_isclass(obj): - if collector.istestclass(obj, name): - outcome.force_result(Class(name, parent=collector)) - elif collector.istestfunction(obj, name): - # mock seems to store unbound methods (issue473), normalize it - obj = getattr(obj, "__func__", obj) - # We need to try and unwrap the function if it's a functools.partial - # or a funtools.wrapped. - # We musn't if it's been wrapped with mock.patch (python 2 only) - if not (isfunction(obj) or isfunction(get_real_func(obj))): - filename, lineno = getfslineno(obj) - warnings.warn_explicit( - message=PytestCollectionWarning( - "cannot collect %r because it is not a function." % name - ), - category=None, - filename=str(filename), - lineno=lineno + 1, - ) - elif getattr(obj, "__test__", True): - if is_generator(obj): - res = Function(name, parent=collector) - reason = deprecated.YIELD_TESTS.format(name=name) - res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) - res.warn(PytestCollectionWarning(reason)) - else: - res = list(collector._genfunctions(name, obj)) - outcome.force_result(res) - - -def pytest_make_parametrize_id(config, val, argname=None): - return None - - -class PyobjContext(object): - module = pyobj_property("Module") - cls = pyobj_property("Class") - instance = pyobj_property("Instance") - - -class PyobjMixin(PyobjContext): - _ALLOW_MARKERS = True - - def __init__(self, *k, **kw): - super(PyobjMixin, self).__init__(*k, **kw) - - @property - def obj(self): - """Underlying Python object.""" - obj = getattr(self, "_obj", None) - if obj is None: - self._obj = obj = self._getobj() - # XXX evil hack - # used to avoid Instance collector marker duplication - if self._ALLOW_MARKERS: - self.own_markers.extend(get_unpacked_marks(self.obj)) - return obj - - @obj.setter - def obj(self, value): - self._obj = value - - def _getobj(self): - """Gets the underlying Python object. May be overwritten by subclasses.""" - return getattr(self.parent.obj, self.name) - - def getmodpath(self, stopatmodule=True, includemodule=False): - """ return python path relative to the containing module. """ - chain = self.listchain() - chain.reverse() - parts = [] - for node in chain: - if isinstance(node, Instance): - continue - name = node.name - if isinstance(node, Module): - name = os.path.splitext(name)[0] - if stopatmodule: - if includemodule: - parts.append(name) - break - parts.append(name) - parts.reverse() - s = ".".join(parts) - return s.replace(".[", "[") - - def reportinfo(self): - # XXX caching? - obj = self.obj - compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) - if isinstance(compat_co_firstlineno, int): - # nose compatibility - fspath = sys.modules[obj.__module__].__file__ - if fspath.endswith(".pyc"): - fspath = fspath[:-1] - lineno = compat_co_firstlineno - else: - fspath, lineno = getfslineno(obj) - modpath = self.getmodpath() - assert isinstance(lineno, int) - return fspath, lineno, modpath - - -class PyCollector(PyobjMixin, nodes.Collector): - def funcnamefilter(self, name): - return self._matches_prefix_or_glob_option("python_functions", name) - - def isnosetest(self, obj): - """ Look for the __test__ attribute, which is applied by the - @nose.tools.istest decorator - """ - # We explicitly check for "is True" here to not mistakenly treat - # classes with a custom __getattr__ returning something truthy (like a - # function) as test classes. - return safe_getattr(obj, "__test__", False) is True - - def classnamefilter(self, name): - return self._matches_prefix_or_glob_option("python_classes", name) - - def istestfunction(self, obj, name): - if self.funcnamefilter(name) or self.isnosetest(obj): - if isinstance(obj, staticmethod): - # static methods need to be unwrapped - obj = safe_getattr(obj, "__func__", False) - return ( - safe_getattr(obj, "__call__", False) - and fixtures.getfixturemarker(obj) is None - ) - else: - return False - - def istestclass(self, obj, name): - return self.classnamefilter(name) or self.isnosetest(obj) - - def _matches_prefix_or_glob_option(self, option_name, name): - """ - checks if the given name matches the prefix or glob-pattern defined - in ini configuration. - """ - for option in self.config.getini(option_name): - if name.startswith(option): - return True - # check that name looks like a glob-string before calling fnmatch - # because this is called for every name in each collected module, - # and fnmatch is somewhat expensive to call - elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( - name, option - ): - return True - return False - - def collect(self): - if not getattr(self.obj, "__test__", True): - return [] - - # NB. we avoid random getattrs and peek in the __dict__ instead - # (XXX originally introduced from a PyPy need, still true?) - dicts = [getattr(self.obj, "__dict__", {})] - for basecls in inspect.getmro(self.obj.__class__): - dicts.append(basecls.__dict__) - seen = {} - values = [] - for dic in dicts: - for name, obj in list(dic.items()): - if name in seen: - continue - seen[name] = True - res = self._makeitem(name, obj) - if res is None: - continue - if not isinstance(res, list): - res = [res] - values.extend(res) - values.sort(key=lambda item: item.reportinfo()[:2]) - return values - - def _makeitem(self, name, obj): - # assert self.ihook.fspath == self.fspath, self - return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) - - def _genfunctions(self, name, funcobj): - module = self.getparent(Module).obj - clscol = self.getparent(Class) - cls = clscol and clscol.obj or None - fm = self.session._fixturemanager - - definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) - fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) - - metafunc = Metafunc( - definition, fixtureinfo, self.config, cls=cls, module=module - ) - methods = [] - if hasattr(module, "pytest_generate_tests"): - methods.append(module.pytest_generate_tests) - if hasattr(cls, "pytest_generate_tests"): - methods.append(cls().pytest_generate_tests) - if methods: - self.ihook.pytest_generate_tests.call_extra( - methods, dict(metafunc=metafunc) - ) - else: - self.ihook.pytest_generate_tests(metafunc=metafunc) - - if not metafunc._calls: - yield Function(name, parent=self, fixtureinfo=fixtureinfo) - else: - # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs - fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) - - # add_funcarg_pseudo_fixture_def may have shadowed some fixtures - # with direct parametrization, so make sure we update what the - # function really needs. - fixtureinfo.prune_dependency_tree() - - for callspec in metafunc._calls: - subname = "%s[%s]" % (name, callspec.id) - yield Function( - name=subname, - parent=self, - callspec=callspec, - callobj=funcobj, - fixtureinfo=fixtureinfo, - keywords={callspec.id: True}, - originalname=name, - ) - - -class Module(nodes.File, PyCollector): - """ Collector for test classes and functions. """ - - def _getobj(self): - return self._importtestmodule() - - def collect(self): - self._inject_setup_module_fixture() - self._inject_setup_function_fixture() - self.session._fixturemanager.parsefactories(self) - return super(Module, self).collect() - - def _inject_setup_module_fixture(self): - """Injects a hidden autouse, module scoped fixture into the collected module object - that invokes setUpModule/tearDownModule if either or both are available. - - Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with - other fixtures (#517). - """ - setup_module = _get_non_fixture_func(self.obj, "setUpModule") - if setup_module is None: - setup_module = _get_non_fixture_func(self.obj, "setup_module") - - teardown_module = _get_non_fixture_func(self.obj, "tearDownModule") - if teardown_module is None: - teardown_module = _get_non_fixture_func(self.obj, "teardown_module") - - if setup_module is None and teardown_module is None: - return - - @fixtures.fixture(autouse=True, scope="module") - def xunit_setup_module_fixture(request): - if setup_module is not None: - _call_with_optional_argument(setup_module, request.module) - yield - if teardown_module is not None: - _call_with_optional_argument(teardown_module, request.module) - - self.obj.__pytest_setup_module = xunit_setup_module_fixture - - def _inject_setup_function_fixture(self): - """Injects a hidden autouse, function scoped fixture into the collected module object - that invokes setup_function/teardown_function if either or both are available. - - Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with - other fixtures (#517). - """ - setup_function = _get_non_fixture_func(self.obj, "setup_function") - teardown_function = _get_non_fixture_func(self.obj, "teardown_function") - if setup_function is None and teardown_function is None: - return - - @fixtures.fixture(autouse=True, scope="function") - def xunit_setup_function_fixture(request): - if request.instance is not None: - # in this case we are bound to an instance, so we need to let - # setup_method handle this - yield - return - if setup_function is not None: - _call_with_optional_argument(setup_function, request.function) - yield - if teardown_function is not None: - _call_with_optional_argument(teardown_function, request.function) - - self.obj.__pytest_setup_function = xunit_setup_function_fixture - - def _importtestmodule(self): - # we assume we are only called once per module - importmode = self.config.getoption("--import-mode") - try: - mod = self.fspath.pyimport(ensuresyspath=importmode) - except SyntaxError: - raise self.CollectError( - _pytest._code.ExceptionInfo.from_current().getrepr(style="short") - ) - except self.fspath.ImportMismatchError: - e = sys.exc_info()[1] - raise self.CollectError( - "import file mismatch:\n" - "imported module %r has this __file__ attribute:\n" - " %s\n" - "which is not the same as the test file we want to collect:\n" - " %s\n" - "HINT: remove __pycache__ / .pyc files and/or use a " - "unique basename for your test file modules" % e.args - ) - except ImportError: - from _pytest._code.code import ExceptionInfo - - exc_info = ExceptionInfo.from_current() - if self.config.getoption("verbose") < 2: - exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = ( - exc_info.getrepr(style="short") - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = safe_str(exc_repr) - raise self.CollectError( - "ImportError while importing test module '{fspath}'.\n" - "Hint: make sure your test modules/packages have valid Python names.\n" - "Traceback:\n" - "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) - ) - except _pytest.runner.Skipped as e: - if e.allow_module_level: - raise - raise self.CollectError( - "Using pytest.skip outside of a test is not allowed. " - "To decorate a test function, use the @pytest.mark.skip " - "or @pytest.mark.skipif decorators instead, and to skip a " - "module use `pytestmark = pytest.mark.{skip,skipif}." - ) - self.config.pluginmanager.consider_module(mod) - return mod - - -class Package(Module): - def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): - session = parent.session - nodes.FSCollector.__init__( - self, fspath, parent=parent, config=config, session=session, nodeid=nodeid - ) - self.name = fspath.dirname - self.trace = session.trace - self._norecursepatterns = session._norecursepatterns - self.fspath = fspath - - def setup(self): - # not using fixtures to call setup_module here because autouse fixtures - # from packages are not called automatically (#4085) - setup_module = _get_non_fixture_func(self.obj, "setUpModule") - if setup_module is None: - setup_module = _get_non_fixture_func(self.obj, "setup_module") - if setup_module is not None: - _call_with_optional_argument(setup_module, self.obj) - - teardown_module = _get_non_fixture_func(self.obj, "tearDownModule") - if teardown_module is None: - teardown_module = _get_non_fixture_func(self.obj, "teardown_module") - if teardown_module is not None: - func = partial(_call_with_optional_argument, teardown_module, self.obj) - self.addfinalizer(func) - - def _recurse(self, dirpath): - if dirpath.basename == "__pycache__": - return False - ihook = self.gethookproxy(dirpath.dirpath()) - if ihook.pytest_ignore_collect(path=dirpath, config=self.config): - return - for pat in self._norecursepatterns: - if dirpath.check(fnmatch=pat): - return False - ihook = self.gethookproxy(dirpath) - ihook.pytest_collect_directory(path=dirpath, parent=self) - return True - - def gethookproxy(self, fspath): - # check if we have the common case of running - # hooks with all conftest.py filesall conftest.py - pm = self.config.pluginmanager - my_conftestmodules = pm._getconftestmodules(fspath) - remove_mods = pm._conftest_plugins.difference(my_conftestmodules) - if remove_mods: - # one or more conftests are not in use at this fspath - proxy = FSHookProxy(fspath, pm, remove_mods) - else: - # all plugis are active for this fspath - proxy = self.config.hook - return proxy - - def _collectfile(self, path, handle_dupes=True): - assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % ( - path, - path.isdir(), - path.exists(), - path.islink(), - ) - ihook = self.gethookproxy(path) - if not self.isinitpath(path): - if ihook.pytest_ignore_collect(path=path, config=self.config): - return () - - if handle_dupes: - keepduplicates = self.config.getoption("keepduplicates") - if not keepduplicates: - duplicate_paths = self.config.pluginmanager._duplicatepaths - if path in duplicate_paths: - return () - else: - duplicate_paths.add(path) - - if self.fspath == path: # __init__.py - return [self] - - return ihook.pytest_collect_file(path=path, parent=self) - - def isinitpath(self, path): - return path in self.session._initialpaths - - def collect(self): - this_path = self.fspath.dirpath() - init_module = this_path.join("__init__.py") - if init_module.check(file=1) and path_matches_patterns( - init_module, self.config.getini("python_files") - ): - yield Module(init_module, self) - pkg_prefixes = set() - for path in this_path.visit(rec=self._recurse, bf=True, sort=True): - # We will visit our own __init__.py file, in which case we skip it. - is_file = path.isfile() - if is_file: - if path.basename == "__init__.py" and path.dirpath() == this_path: - continue - - parts_ = parts(path.strpath) - if any( - pkg_prefix in parts_ and pkg_prefix.join("__init__.py") != path - for pkg_prefix in pkg_prefixes - ): - continue - - if is_file: - for x in self._collectfile(path): - yield x - elif not path.isdir(): - # Broken symlink or invalid/missing file. - continue - elif path.join("__init__.py").check(file=1): - pkg_prefixes.add(path) - - -def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): - """ - Return a callable to perform xunit-style setup or teardown if - the function exists in the ``holder`` object. - The ``param_obj`` parameter is the parameter which will be passed to the function - when the callable is called without arguments, defaults to the ``holder`` object. - Return ``None`` if a suitable callable is not found. - """ - # TODO: only needed because of Package! - param_obj = param_obj if param_obj is not None else holder - result = _get_non_fixture_func(holder, attr_name) - if result is not None: - arg_count = result.__code__.co_argcount - if inspect.ismethod(result): - arg_count -= 1 - if arg_count: - return lambda: result(param_obj) - else: - return result - - -def _call_with_optional_argument(func, arg): - """Call the given function with the given argument if func accepts one argument, otherwise - calls func without arguments""" - arg_count = func.__code__.co_argcount - if inspect.ismethod(func): - arg_count -= 1 - if arg_count: - func(arg) - else: - func() - - -def _get_non_fixture_func(obj, name): - """Return the attribute from the given object to be used as a setup/teardown - xunit-style function, but only if not marked as a fixture to - avoid calling it twice. - """ - meth = getattr(obj, name, None) - if fixtures.getfixturemarker(meth) is None: - return meth - - -class Class(PyCollector): - """ Collector for test methods. """ - - def collect(self): - if not safe_getattr(self.obj, "__test__", True): - return [] - if hasinit(self.obj): - self.warn( - PytestCollectionWarning( - "cannot collect test class %r because it has a " - "__init__ constructor (from: %s)" - % (self.obj.__name__, self.parent.nodeid) - ) - ) - return [] - elif hasnew(self.obj): - self.warn( - PytestCollectionWarning( - "cannot collect test class %r because it has a " - "__new__ constructor (from: %s)" - % (self.obj.__name__, self.parent.nodeid) - ) - ) - return [] - - self._inject_setup_class_fixture() - self._inject_setup_method_fixture() - - return [Instance(name="()", parent=self)] - - def _inject_setup_class_fixture(self): - """Injects a hidden autouse, class scoped fixture into the collected class object - that invokes setup_class/teardown_class if either or both are available. - - Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with - other fixtures (#517). - """ - setup_class = _get_non_fixture_func(self.obj, "setup_class") - teardown_class = getattr(self.obj, "teardown_class", None) - if setup_class is None and teardown_class is None: - return - - @fixtures.fixture(autouse=True, scope="class") - def xunit_setup_class_fixture(cls): - if setup_class is not None: - func = getimfunc(setup_class) - _call_with_optional_argument(func, self.obj) - yield - if teardown_class is not None: - func = getimfunc(teardown_class) - _call_with_optional_argument(func, self.obj) - - self.obj.__pytest_setup_class = xunit_setup_class_fixture - - def _inject_setup_method_fixture(self): - """Injects a hidden autouse, function scoped fixture into the collected class object - that invokes setup_method/teardown_method if either or both are available. - - Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with - other fixtures (#517). - """ - setup_method = _get_non_fixture_func(self.obj, "setup_method") - teardown_method = getattr(self.obj, "teardown_method", None) - if setup_method is None and teardown_method is None: - return - - @fixtures.fixture(autouse=True, scope="function") - def xunit_setup_method_fixture(self, request): - method = request.function - if setup_method is not None: - func = getattr(self, "setup_method") - _call_with_optional_argument(func, method) - yield - if teardown_method is not None: - func = getattr(self, "teardown_method") - _call_with_optional_argument(func, method) - - self.obj.__pytest_setup_method = xunit_setup_method_fixture - - -class Instance(PyCollector): - _ALLOW_MARKERS = False # hack, destroy later - # instances share the object with their parents in a way - # that duplicates markers instances if not taken out - # can be removed at node structure reorganization time - - def _getobj(self): - return self.parent.obj() - - def collect(self): - self.session._fixturemanager.parsefactories(self) - return super(Instance, self).collect() - - def newinstance(self): - self.obj = self._getobj() - return self.obj - - -class FunctionMixin(PyobjMixin): - """ mixin for the code common to Function and Generator. - """ - - def setup(self): - """ perform setup for this test function. """ - if isinstance(self.parent, Instance): - self.parent.newinstance() - self.obj = self._getobj() - - def _prunetraceback(self, excinfo): - if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): - code = _pytest._code.Code(get_real_func(self.obj)) - path, firstlineno = code.path, code.firstlineno - traceback = excinfo.traceback - ntraceback = traceback.cut(path=path, firstlineno=firstlineno) - if ntraceback == traceback: - ntraceback = ntraceback.cut(path=path) - if ntraceback == traceback: - ntraceback = ntraceback.filter(filter_traceback) - if not ntraceback: - ntraceback = traceback - - excinfo.traceback = ntraceback.filter() - # issue364: mark all but first and last frames to - # only show a single-line message for each frame - if self.config.getoption("tbstyle", "auto") == "auto": - if len(excinfo.traceback) > 2: - for entry in excinfo.traceback[1:-1]: - entry.set_repr_style("short") - - def repr_failure(self, excinfo, outerr=None): - assert outerr is None, "XXX outerr usage is deprecated" - style = self.config.getoption("tbstyle", "auto") - if style == "auto": - style = "long" - return self._repr_failure_py(excinfo, style=style) - - -def hasinit(obj): - init = getattr(obj, "__init__", None) - if init: - return init != object.__init__ - - -def hasnew(obj): - new = getattr(obj, "__new__", None) - if new: - return new != object.__new__ - - -class CallSpec2(object): - def __init__(self, metafunc): - self.metafunc = metafunc - self.funcargs = {} - self._idlist = [] - self.params = {} - self._globalid = NOTSET - self._globalparam = NOTSET - self._arg2scopenum = {} # used for sorting parametrized resources - self.marks = [] - self.indices = {} - - def copy(self): - cs = CallSpec2(self.metafunc) - cs.funcargs.update(self.funcargs) - cs.params.update(self.params) - cs.marks.extend(self.marks) - cs.indices.update(self.indices) - cs._arg2scopenum.update(self._arg2scopenum) - cs._idlist = list(self._idlist) - cs._globalid = self._globalid - cs._globalparam = self._globalparam - return cs - - def _checkargnotcontained(self, arg): - if arg in self.params or arg in self.funcargs: - raise ValueError("duplicate %r" % (arg,)) - - def getparam(self, name): - try: - return self.params[name] - except KeyError: - if self._globalparam is NOTSET: - raise ValueError(name) - return self._globalparam - - @property - def id(self): - return "-".join(map(safe_str, self._idlist)) - - def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): - for arg, val in zip(argnames, valset): - self._checkargnotcontained(arg) - valtype_for_arg = valtypes[arg] - getattr(self, valtype_for_arg)[arg] = val - self.indices[arg] = param_index - self._arg2scopenum[arg] = scopenum - self._idlist.append(id) - self.marks.extend(normalize_mark_list(marks)) - - def setall(self, funcargs, id, param): - for x in funcargs: - self._checkargnotcontained(x) - self.funcargs.update(funcargs) - if id is not NOTSET: - self._idlist.append(id) - if param is not NOTSET: - assert self._globalparam is NOTSET - self._globalparam = param - for arg in funcargs: - self._arg2scopenum[arg] = fixtures.scopenum_function - - -class Metafunc(fixtures.FuncargnamesCompatAttr): - """ - Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. - They help to inspect a test function and to generate tests according to - test configuration or values specified in the class or module where a - test function is defined. - """ - - def __init__(self, definition, fixtureinfo, config, cls=None, module=None): - assert ( - isinstance(definition, FunctionDefinition) - or type(definition).__name__ == "DefinitionMock" - ) - self.definition = definition - - #: access to the :class:`_pytest.config.Config` object for the test session - self.config = config - - #: the module object where the test function is defined in. - self.module = module - - #: underlying python test function - self.function = definition.obj - - #: set of fixture names required by the test function - self.fixturenames = fixtureinfo.names_closure - - #: class object where the test function is defined in or ``None``. - self.cls = cls - - self._calls = [] - self._ids = set() - self._arg2fixturedefs = fixtureinfo.name2fixturedefs - - def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): - """ Add new invocations to the underlying test function using the list - of argvalues for the given argnames. Parametrization is performed - during the collection phase. If you need to setup expensive resources - see about setting indirect to do it rather at test setup time. - - :arg argnames: a comma-separated string denoting one or more argument - names, or a list/tuple of argument strings. - - :arg argvalues: The list of argvalues determines how often a - test is invoked with different argument values. If only one - argname was specified argvalues is a list of values. If N - argnames were specified, argvalues must be a list of N-tuples, - where each tuple-element specifies a value for its respective - argname. - - :arg indirect: The list of argnames or boolean. A list of arguments' - names (subset of argnames). If True the list contains all names from - the argnames. Each argvalue corresponding to an argname in this list will - be passed as request.param to its respective argname fixture - function so that it can perform more expensive setups during the - setup phase of a test rather than at collection time. - - :arg ids: list of string ids, or a callable. - If strings, each is corresponding to the argvalues so that they are - part of the test id. If None is given as id of specific test, the - automatically generated id for that argument will be used. - If callable, it should take one argument (a single argvalue) and return - a string or return None. If None, the automatically generated id for that - argument will be used. - If no ids are provided they will be generated automatically from - the argvalues. - - :arg scope: if specified it denotes the scope of the parameters. - The scope is used for grouping tests by parameter instances. - It will also override any fixture-function defined scope, allowing - to set a dynamic scope using test context or configuration. - """ - from _pytest.fixtures import scope2index - from _pytest.mark import ParameterSet - - argnames, parameters = ParameterSet._for_parametrize( - argnames, - argvalues, - self.function, - self.config, - function_definition=self.definition, - ) - del argvalues - - if scope is None: - scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) - - self._validate_if_using_arg_names(argnames, indirect) - - arg_values_types = self._resolve_arg_value_types(argnames, indirect) - - ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition) - - scopenum = scope2index( - scope, descr="parametrize() call in {}".format(self.function.__name__) - ) - - # create the new calls: if we are parametrize() multiple times (by applying the decorator - # more than once) then we accumulate those calls generating the cartesian product - # of all calls - newcalls = [] - for callspec in self._calls or [CallSpec2(self)]: - for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): - newcallspec = callspec.copy() - newcallspec.setmulti2( - arg_values_types, - argnames, - param_set.values, - param_id, - param_set.marks, - scopenum, - param_index, - ) - newcalls.append(newcallspec) - self._calls = newcalls - - def _resolve_arg_ids(self, argnames, ids, parameters, item): - """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given - to ``parametrize``. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param ids: the ids parameter of the parametrized call (see docs). - :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``. - :param Item item: the item that generated this parametrized call. - :rtype: List[str] - :return: the list of ids for each argname given - """ - from _pytest._io.saferepr import saferepr - - idfn = None - if callable(ids): - idfn = ids - ids = None - if ids: - func_name = self.function.__name__ - if len(ids) != len(parameters): - msg = "In {}: {} parameter sets specified, with different number of ids: {}" - fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False) - for id_value in ids: - if id_value is not None and not isinstance(id_value, six.string_types): - msg = "In {}: ids must be list of strings, found: {} (type: {!r})" - fail( - msg.format(func_name, saferepr(id_value), type(id_value)), - pytrace=False, - ) - ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item) - return ids - - def _resolve_arg_value_types(self, argnames, indirect): - """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg" - to the function, based on the ``indirect`` parameter of the parametrized() call. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param indirect: same ``indirect`` parameter of ``parametrize()``. - :rtype: Dict[str, str] - A dict mapping each arg name to either: - * "params" if the argname should be the parameter of a fixture of the same name. - * "funcargs" if the argname should be a parameter to the parametrized test function. - """ - valtypes = {} - if indirect is True: - valtypes = dict.fromkeys(argnames, "params") - elif indirect is False: - valtypes = dict.fromkeys(argnames, "funcargs") - elif isinstance(indirect, (tuple, list)): - valtypes = dict.fromkeys(argnames, "funcargs") - for arg in indirect: - if arg not in argnames: - fail( - "In {}: indirect fixture '{}' doesn't exist".format( - self.function.__name__, arg - ), - pytrace=False, - ) - valtypes[arg] = "params" - return valtypes - - def _validate_if_using_arg_names(self, argnames, indirect): - """ - Check if all argnames are being used, by default values, or directly/indirectly. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param indirect: same ``indirect`` parameter of ``parametrize()``. - :raise ValueError: if validation fails. - """ - default_arg_names = set(get_default_arg_names(self.function)) - func_name = self.function.__name__ - for arg in argnames: - if arg not in self.fixturenames: - if arg in default_arg_names: - fail( - "In {}: function already takes an argument '{}' with a default value".format( - func_name, arg - ), - pytrace=False, - ) - else: - if isinstance(indirect, (tuple, list)): - name = "fixture" if arg in indirect else "argument" - else: - name = "fixture" if indirect else "argument" - fail( - "In {}: function uses no {} '{}'".format(func_name, name, arg), - pytrace=False, - ) - - -def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): - """Find the most appropriate scope for a parametrized call based on its arguments. - - When there's at least one direct argument, always use "function" scope. - - When a test function is parametrized and all its arguments are indirect - (e.g. fixtures), return the most narrow scope based on the fixtures used. - - Related to issue #1832, based on code posted by @Kingdread. - """ - from _pytest.fixtures import scopes - - if isinstance(indirect, (list, tuple)): - all_arguments_are_fixtures = len(indirect) == len(argnames) - else: - all_arguments_are_fixtures = bool(indirect) - - if all_arguments_are_fixtures: - fixturedefs = arg2fixturedefs or {} - used_scopes = [ - fixturedef[0].scope - for name, fixturedef in fixturedefs.items() - if name in argnames - ] - if used_scopes: - # Takes the most narrow scope from used fixtures - for scope in reversed(scopes): - if scope in used_scopes: - return scope - - return "function" - - -def _ascii_escaped_by_config(val, config): - if config is None: - escape_option = False - else: - escape_option = config.getini( - "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" - ) - return val if escape_option else ascii_escaped(val) - - -def _idval(val, argname, idx, idfn, item, config): - if idfn: - try: - generated_id = idfn(val) - if generated_id is not None: - val = generated_id - except Exception as e: - # See issue https://github.com/pytest-dev/pytest/issues/2169 - msg = "{}: error raised while trying to determine id of parameter '{}' at position {}\n" - msg = msg.format(item.nodeid, argname, idx) - # we only append the exception type and message because on Python 2 reraise does nothing - msg += " {}: {}\n".format(type(e).__name__, e) - six.raise_from(ValueError(msg), e) - elif config: - hook_id = config.hook.pytest_make_parametrize_id( - config=config, val=val, argname=argname - ) - if hook_id: - return hook_id - - if isinstance(val, STRING_TYPES): - return _ascii_escaped_by_config(val, config) - elif val is None or isinstance(val, (float, int, bool)): - return str(val) - elif isinstance(val, REGEX_TYPE): - return ascii_escaped(val.pattern) - elif enum is not None and isinstance(val, enum.Enum): - return str(val) - elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): - return val.__name__ - return str(argname) + str(idx) - - -def limit_idval(limit): - import functools - - names = {} - limit -= 6 - assert limit > 0 - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kw): - idval = func(*args, **kw) - if len(idval) > limit: - prefix = idval[:limit] - # There might be same prefix for the different test cases - take item into account - name = "{}-{}".format(kw.get('item', ''), safe_str(prefix)) - idx = names.setdefault(name, -1) + 1 - names[name] = idx - idval = "{}-{}".format(safe_str(prefix), idx) - return idval - - return wrapper - - return decorator - - -# XXX limit testnames in the name of sanity and readability -@limit_idval(limit=500) -def _idvalset(idx, parameterset, argnames, idfn, ids, item, config): - if parameterset.id is not None: - return parameterset.id - if ids is None or (idx >= len(ids) or ids[idx] is None): - this_id = [ - _idval(val, argname, idx, idfn, item=item, config=config) - for val, argname in zip(parameterset.values, argnames) - ] - return "-".join(this_id) - else: - return _ascii_escaped_by_config(ids[idx], config) - - -def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None): - ids = [ - _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item) - for valindex, parameterset in enumerate(parametersets) - ] - if len(set(ids)) != len(ids): - # The ids are not unique - duplicates = [testid for testid in ids if ids.count(testid) > 1] - counters = collections.defaultdict(lambda: 0) - for index, testid in enumerate(ids): - if testid in duplicates: - ids[index] = testid + str(counters[testid]) - counters[testid] += 1 - return ids - - -def show_fixtures_per_test(config): - from _pytest.main import wrap_session - - return wrap_session(config, _show_fixtures_per_test) - - -def _show_fixtures_per_test(config, session): - import _pytest.config - - session.perform_collect() - curdir = py.path.local() - tw = _pytest.config.create_terminal_writer(config) - verbose = config.getvalue("verbose") - - def get_best_relpath(func): - loc = getlocation(func, curdir) - return curdir.bestrelpath(loc) - - def write_fixture(fixture_def): - argname = fixture_def.argname - if verbose <= 0 and argname.startswith("_"): - return - if verbose > 0: - bestrel = get_best_relpath(fixture_def.func) - funcargspec = "{} -- {}".format(argname, bestrel) - else: - funcargspec = argname - tw.line(funcargspec, green=True) - fixture_doc = fixture_def.func.__doc__ - if fixture_doc: - write_docstring(tw, fixture_doc) - else: - tw.line(" no docstring available", red=True) - - def write_item(item): - try: - info = item._fixtureinfo - except AttributeError: - # doctests items have no _fixtureinfo attribute - return - if not info.name2fixturedefs: - # this test item does not use any fixtures - return - tw.line() - tw.sep("-", "fixtures used by {}".format(item.name)) - tw.sep("-", "({})".format(get_best_relpath(item.function))) - # dict key not used in loop but needed for sorting - for _, fixturedefs in sorted(info.name2fixturedefs.items()): - assert fixturedefs is not None - if not fixturedefs: - continue - # last item is expected to be the one used by the test item - write_fixture(fixturedefs[-1]) - - for session_item in session.items: - write_item(session_item) - - -def showfixtures(config): - from _pytest.main import wrap_session - - return wrap_session(config, _showfixtures_main) - - -def _showfixtures_main(config, session): - import _pytest.config - - session.perform_collect() - curdir = py.path.local() - tw = _pytest.config.create_terminal_writer(config) - verbose = config.getvalue("verbose") - - fm = session._fixturemanager - - available = [] - seen = set() - - for argname, fixturedefs in fm._arg2fixturedefs.items(): - assert fixturedefs is not None - if not fixturedefs: - continue - for fixturedef in fixturedefs: - loc = getlocation(fixturedef.func, curdir) - if (fixturedef.argname, loc) in seen: - continue - seen.add((fixturedef.argname, loc)) - available.append( - ( - len(fixturedef.baseid), - fixturedef.func.__module__, - curdir.bestrelpath(loc), - fixturedef.argname, - fixturedef, - ) - ) - - available.sort() - currentmodule = None - for baseid, module, bestrel, argname, fixturedef in available: - if currentmodule != module: - if not module.startswith("_pytest."): - tw.line() - tw.sep("-", "fixtures defined from %s" % (module,)) - currentmodule = module - if verbose <= 0 and argname[0] == "_": - continue - tw.write(argname, green=True) - if fixturedef.scope != "function": - tw.write(" [%s scope]" % fixturedef.scope, cyan=True) - if verbose > 0: - tw.write(" -- %s" % bestrel, yellow=True) - tw.write("\n") - loc = getlocation(fixturedef.func, curdir) - doc = fixturedef.func.__doc__ or "" - if doc: - write_docstring(tw, doc) - else: - tw.line(" %s: no docstring available" % (loc,), red=True) - tw.line() - - -def write_docstring(tw, doc, indent=" "): - doc = doc.rstrip() - if "\n" in doc: - firstline, rest = doc.split("\n", 1) - else: - firstline, rest = doc, "" - - if firstline.strip(): - tw.line(indent + firstline.strip()) - - if rest: - for line in dedent(rest).split("\n"): - tw.write(indent + line + "\n") - - -class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): - """ a Function Item is responsible for setting up and executing a - Python test function. - """ - - # disable since functions handle it themselves - _ALLOW_MARKERS = False - - def __init__( - self, - name, - parent, - args=None, - config=None, - callspec=None, - callobj=NOTSET, - keywords=None, - session=None, - fixtureinfo=None, - originalname=None, - ): - super(Function, self).__init__(name, parent, config=config, session=session) - self._args = args - if callobj is not NOTSET: - self.obj = callobj - - self.keywords.update(self.obj.__dict__) - self.own_markers.extend(get_unpacked_marks(self.obj)) - if callspec: - self.callspec = callspec - # this is total hostile and a mess - # keywords are broken by design by now - # this will be redeemed later - for mark in callspec.marks: - # feel free to cry, this was broken for years before - # and keywords cant fix it per design - self.keywords[mark.name] = mark - self.own_markers.extend(normalize_mark_list(callspec.marks)) - if keywords: - self.keywords.update(keywords) - - # todo: this is a hell of a hack - # https://github.com/pytest-dev/pytest/issues/4569 - - self.keywords.update( - dict.fromkeys( - [ - mark.name - for mark in self.iter_markers() - if mark.name not in self.keywords - ], - True, - ) - ) - - if fixtureinfo is None: - fixtureinfo = self.session._fixturemanager.getfixtureinfo( - self, self.obj, self.cls, funcargs=True - ) - self._fixtureinfo = fixtureinfo - self.fixturenames = fixtureinfo.names_closure - self._initrequest() - - #: original function name, without any decorations (for example - #: parametrization adds a ``"[...]"`` suffix to function names). - #: - #: .. versionadded:: 3.0 - self.originalname = originalname - - def _initrequest(self): - self.funcargs = {} - self._request = fixtures.FixtureRequest(self) - - @property - def function(self): - "underlying python 'function' object" - return getimfunc(self.obj) - - def _getobj(self): - name = self.name - i = name.find("[") # parametrization - if i != -1: - name = name[:i] - return getattr(self.parent.obj, name) - - @property - def _pyfuncitem(self): - "(compatonly) for code expecting pytest-2.2 style request objects" - return self - - def runtest(self): - """ execute the underlying test function. """ - self.ihook.pytest_pyfunc_call(pyfuncitem=self) - - def setup(self): - super(Function, self).setup() - fixtures.fillfixtures(self) - - -class FunctionDefinition(Function): - """ - internal hack until we get actual definition nodes instead of the - crappy metafunc hack - """ - - def runtest(self): - raise RuntimeError("function definitions are not supposed to be used") - - setup = runtest diff --git a/contrib/python/pytest/py2/_pytest/python_api.py b/contrib/python/pytest/py2/_pytest/python_api.py deleted file mode 100644 index f6e475c3a2..0000000000 --- a/contrib/python/pytest/py2/_pytest/python_api.py +++ /dev/null @@ -1,743 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import math -import pprint -import sys -import warnings -from decimal import Decimal -from numbers import Number - -from more_itertools.more import always_iterable -from six.moves import filterfalse -from six.moves import zip - -import _pytest._code -from _pytest import deprecated -from _pytest.compat import isclass -from _pytest.compat import Iterable -from _pytest.compat import Mapping -from _pytest.compat import Sized -from _pytest.compat import STRING_TYPES -from _pytest.outcomes import fail - -BASE_TYPE = (type, STRING_TYPES) - - -def _cmp_raises_type_error(self, other): - """__cmp__ implementation which raises TypeError. Used - by Approx base classes to implement only == and != and raise a - TypeError for other comparisons. - - Needed in Python 2 only, Python 3 all it takes is not implementing the - other operators at all. - """ - __tracebackhide__ = True - raise TypeError( - "Comparison operators other than == and != not supported by approx objects" - ) - - -def _non_numeric_type_error(value, at): - at_str = " at {}".format(at) if at else "" - return TypeError( - "cannot make approximate comparisons to non-numeric values: {!r} {}".format( - value, at_str - ) - ) - - -# builtin pytest.approx helper - - -class ApproxBase(object): - """ - Provide shared utilities for making approximate comparisons between numbers - or sequences of numbers. - """ - - # Tell numpy to use our `__eq__` operator instead of its. - __array_ufunc__ = None - __array_priority__ = 100 - - def __init__(self, expected, rel=None, abs=None, nan_ok=False): - __tracebackhide__ = True - self.expected = expected - self.abs = abs - self.rel = rel - self.nan_ok = nan_ok - self._check_type() - - def __repr__(self): - raise NotImplementedError - - def __eq__(self, actual): - return all( - a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) - ) - - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - if sys.version_info[0] == 2: - __cmp__ = _cmp_raises_type_error - - def _approx_scalar(self, x): - return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) - - def _yield_comparisons(self, actual): - """ - Yield all the pairs of numbers to be compared. This is used to - implement the `__eq__` method. - """ - raise NotImplementedError - - def _check_type(self): - """ - Raise a TypeError if the expected value is not a valid type. - """ - # This is only a concern if the expected value is a sequence. In every - # other case, the approx() function ensures that the expected value has - # a numeric type. For this reason, the default is to do nothing. The - # classes that deal with sequences should reimplement this method to - # raise if there are any non-numeric elements in the sequence. - pass - - -def _recursive_list_map(f, x): - if isinstance(x, list): - return list(_recursive_list_map(f, xi) for xi in x) - else: - return f(x) - - -class ApproxNumpy(ApproxBase): - """ - Perform approximate comparisons where the expected value is numpy array. - """ - - def __repr__(self): - list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) - return "approx({!r})".format(list_scalars) - - if sys.version_info[0] == 2: - __cmp__ = _cmp_raises_type_error - - def __eq__(self, actual): - import numpy as np - - # self.expected is supposed to always be an array here - - if not np.isscalar(actual): - try: - actual = np.asarray(actual) - except: # noqa - raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) - - if not np.isscalar(actual) and actual.shape != self.expected.shape: - return False - - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - import numpy as np - - # `actual` can either be a numpy array or a scalar, it is treated in - # `__eq__` before being passed to `ApproxBase.__eq__`, which is the - # only method that calls this one. - - if np.isscalar(actual): - for i in np.ndindex(self.expected.shape): - yield actual, self.expected[i].item() - else: - for i in np.ndindex(self.expected.shape): - yield actual[i].item(), self.expected[i].item() - - -class ApproxMapping(ApproxBase): - """ - Perform approximate comparisons where the expected value is a mapping with - numeric values (the keys can be anything). - """ - - def __repr__(self): - return "approx({!r})".format( - {k: self._approx_scalar(v) for k, v in self.expected.items()} - ) - - def __eq__(self, actual): - if set(actual.keys()) != set(self.expected.keys()): - return False - - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - for k in self.expected.keys(): - yield actual[k], self.expected[k] - - def _check_type(self): - __tracebackhide__ = True - for key, value in self.expected.items(): - if isinstance(value, type(self.expected)): - msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" - raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) - elif not isinstance(value, Number): - raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) - - -class ApproxSequencelike(ApproxBase): - """ - Perform approximate comparisons where the expected value is a sequence of - numbers. - """ - - def __repr__(self): - seq_type = type(self.expected) - if seq_type not in (tuple, list, set): - seq_type = list - return "approx({!r})".format( - seq_type(self._approx_scalar(x) for x in self.expected) - ) - - def __eq__(self, actual): - if len(actual) != len(self.expected): - return False - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - return zip(actual, self.expected) - - def _check_type(self): - __tracebackhide__ = True - for index, x in enumerate(self.expected): - if isinstance(x, type(self.expected)): - msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" - raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) - elif not isinstance(x, Number): - raise _non_numeric_type_error( - self.expected, at="index {}".format(index) - ) - - -class ApproxScalar(ApproxBase): - """ - Perform approximate comparisons where the expected value is a single number. - """ - - DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 - DEFAULT_RELATIVE_TOLERANCE = 1e-6 - - def __repr__(self): - """ - Return a string communicating both the expected value and the tolerance - for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode - plus/minus symbol if this is python3 (it's too hard to get right for - python2). - """ - if isinstance(self.expected, complex): - return str(self.expected) - - # Infinities aren't compared using tolerances, so don't show a - # tolerance. - if math.isinf(self.expected): - return str(self.expected) - - # If a sensible tolerance can't be calculated, self.tolerance will - # raise a ValueError. In this case, display '???'. - try: - vetted_tolerance = "{:.1e}".format(self.tolerance) - except ValueError: - vetted_tolerance = "???" - - if sys.version_info[0] == 2: - return "{} +- {}".format(self.expected, vetted_tolerance) - else: - return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) - - def __eq__(self, actual): - """ - Return true if the given value is equal to the expected value within - the pre-specified tolerance. - """ - if _is_numpy_array(actual): - # Call ``__eq__()`` manually to prevent infinite-recursion with - # numpy<1.13. See #3748. - return all(self.__eq__(a) for a in actual.flat) - - # Short-circuit exact equality. - if actual == self.expected: - return True - - # Allow the user to control whether NaNs are considered equal to each - # other or not. The abs() calls are for compatibility with complex - # numbers. - if math.isnan(abs(self.expected)): - return self.nan_ok and math.isnan(abs(actual)) - - # Infinity shouldn't be approximately equal to anything but itself, but - # if there's a relative tolerance, it will be infinite and infinity - # will seem approximately equal to everything. The equal-to-itself - # case would have been short circuited above, so here we can just - # return false if the expected value is infinite. The abs() call is - # for compatibility with complex numbers. - if math.isinf(abs(self.expected)): - return False - - # Return true if the two numbers are within the tolerance. - return abs(self.expected - actual) <= self.tolerance - - __hash__ = None - - @property - def tolerance(self): - """ - Return the tolerance for the comparison. This could be either an - absolute tolerance or a relative tolerance, depending on what the user - specified or which would be larger. - """ - - def set_default(x, default): - return x if x is not None else default - - # Figure out what the absolute tolerance should be. ``self.abs`` is - # either None or a value specified by the user. - absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) - - if absolute_tolerance < 0: - raise ValueError( - "absolute tolerance can't be negative: {}".format(absolute_tolerance) - ) - if math.isnan(absolute_tolerance): - raise ValueError("absolute tolerance can't be NaN.") - - # If the user specified an absolute tolerance but not a relative one, - # just return the absolute tolerance. - if self.rel is None: - if self.abs is not None: - return absolute_tolerance - - # Figure out what the relative tolerance should be. ``self.rel`` is - # either None or a value specified by the user. This is done after - # we've made sure the user didn't ask for an absolute tolerance only, - # because we don't want to raise errors about the relative tolerance if - # we aren't even going to use it. - relative_tolerance = set_default( - self.rel, self.DEFAULT_RELATIVE_TOLERANCE - ) * abs(self.expected) - - if relative_tolerance < 0: - raise ValueError( - "relative tolerance can't be negative: {}".format(absolute_tolerance) - ) - if math.isnan(relative_tolerance): - raise ValueError("relative tolerance can't be NaN.") - - # Return the larger of the relative and absolute tolerances. - return max(relative_tolerance, absolute_tolerance) - - -class ApproxDecimal(ApproxScalar): - """ - Perform approximate comparisons where the expected value is a decimal. - """ - - DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") - DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") - - -def approx(expected, rel=None, abs=None, nan_ok=False): - """ - Assert that two numbers (or two sets of numbers) are equal to each other - within some tolerance. - - Due to the `intricacies of floating-point arithmetic`__, numbers that we - would intuitively expect to be equal are not always so:: - - >>> 0.1 + 0.2 == 0.3 - False - - __ https://docs.python.org/3/tutorial/floatingpoint.html - - This problem is commonly encountered when writing tests, e.g. when making - sure that floating-point values are what you expect them to be. One way to - deal with this problem is to assert that two floating-point numbers are - equal to within some appropriate tolerance:: - - >>> abs((0.1 + 0.2) - 0.3) < 1e-6 - True - - However, comparisons like this are tedious to write and difficult to - understand. Furthermore, absolute comparisons like the one above are - usually discouraged because there's no tolerance that works well for all - situations. ``1e-6`` is good for numbers around ``1``, but too small for - very big numbers and too big for very small ones. It's better to express - the tolerance as a fraction of the expected value, but relative comparisons - like that are even more difficult to write correctly and concisely. - - The ``approx`` class performs floating-point comparisons using a syntax - that's as intuitive as possible:: - - >>> from pytest import approx - >>> 0.1 + 0.2 == approx(0.3) - True - - The same syntax also works for sequences of numbers:: - - >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) - True - - Dictionary *values*:: - - >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) - True - - ``numpy`` arrays:: - - >>> import numpy as np # doctest: +SKIP - >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP - True - - And for a ``numpy`` array against a scalar:: - - >>> import numpy as np # doctest: +SKIP - >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP - True - - By default, ``approx`` considers numbers within a relative tolerance of - ``1e-6`` (i.e. one part in a million) of its expected value to be equal. - This treatment would lead to surprising results if the expected value was - ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. - To handle this case less surprisingly, ``approx`` also considers numbers - within an absolute tolerance of ``1e-12`` of its expected value to be - equal. Infinity and NaN are special cases. Infinity is only considered - equal to itself, regardless of the relative tolerance. NaN is not - considered equal to anything by default, but you can make it be equal to - itself by setting the ``nan_ok`` argument to True. (This is meant to - facilitate comparing arrays that use NaN to mean "no data".) - - Both the relative and absolute tolerances can be changed by passing - arguments to the ``approx`` constructor:: - - >>> 1.0001 == approx(1) - False - >>> 1.0001 == approx(1, rel=1e-3) - True - >>> 1.0001 == approx(1, abs=1e-3) - True - - If you specify ``abs`` but not ``rel``, the comparison will not consider - the relative tolerance at all. In other words, two numbers that are within - the default relative tolerance of ``1e-6`` will still be considered unequal - if they exceed the specified absolute tolerance. If you specify both - ``abs`` and ``rel``, the numbers will be considered equal if either - tolerance is met:: - - >>> 1 + 1e-8 == approx(1) - True - >>> 1 + 1e-8 == approx(1, abs=1e-12) - False - >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) - True - - If you're thinking about using ``approx``, then you might want to know how - it compares to other good ways of comparing floating-point numbers. All of - these algorithms are based on relative and absolute tolerances and should - agree for the most part, but they do have meaningful differences: - - - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative - tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute - tolerance is met. Because the relative tolerance is calculated w.r.t. - both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor - ``b`` is a "reference value"). You have to specify an absolute tolerance - if you want to compare to ``0.0`` because there is no tolerance by - default. Only available in python>=3.5. `More information...`__ - - __ https://docs.python.org/3/library/math.html#math.isclose - - - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference - between ``a`` and ``b`` is less that the sum of the relative tolerance - w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance - is only calculated w.r.t. ``b``, this test is asymmetric and you can - think of ``b`` as the reference value. Support for comparing sequences - is provided by ``numpy.allclose``. `More information...`__ - - __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html - - - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` - are within an absolute tolerance of ``1e-7``. No relative tolerance is - considered and the absolute tolerance cannot be changed, so this function - is not appropriate for very large or very small numbers. Also, it's only - available in subclasses of ``unittest.TestCase`` and it's ugly because it - doesn't follow PEP8. `More information...`__ - - __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual - - - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative - tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. - Because the relative tolerance is only calculated w.r.t. ``b``, this test - is asymmetric and you can think of ``b`` as the reference value. In the - special case that you explicitly specify an absolute tolerance but not a - relative tolerance, only the absolute tolerance is considered. - - .. warning:: - - .. versionchanged:: 3.2 - - In order to avoid inconsistent behavior, ``TypeError`` is - raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. - The example below illustrates the problem:: - - assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) - assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) - - In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` - to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to - comparison. This is because the call hierarchy of rich comparisons - follows a fixed behavior. `More information...`__ - - __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ - """ - - # Delegate the comparison to a class that knows how to deal with the type - # of the expected value (e.g. int, float, list, dict, numpy.array, etc). - # - # The primary responsibility of these classes is to implement ``__eq__()`` - # and ``__repr__()``. The former is used to actually check if some - # "actual" value is equivalent to the given expected value within the - # allowed tolerance. The latter is used to show the user the expected - # value and tolerance, in the case that a test failed. - # - # The actual logic for making approximate comparisons can be found in - # ApproxScalar, which is used to compare individual numbers. All of the - # other Approx classes eventually delegate to this class. The ApproxBase - # class provides some convenient methods and overloads, but isn't really - # essential. - - __tracebackhide__ = True - - if isinstance(expected, Decimal): - cls = ApproxDecimal - elif isinstance(expected, Number): - cls = ApproxScalar - elif isinstance(expected, Mapping): - cls = ApproxMapping - elif _is_numpy_array(expected): - cls = ApproxNumpy - elif ( - isinstance(expected, Iterable) - and isinstance(expected, Sized) - and not isinstance(expected, STRING_TYPES) - ): - cls = ApproxSequencelike - else: - raise _non_numeric_type_error(expected, at=None) - - return cls(expected, rel, abs, nan_ok) - - -def _is_numpy_array(obj): - """ - Return true if the given object is a numpy array. Make a special effort to - avoid importing numpy unless it's really necessary. - """ - import sys - - np = sys.modules.get("numpy") - if np is not None: - return isinstance(obj, np.ndarray) - return False - - -# builtin pytest.raises helper - - -def raises(expected_exception, *args, **kwargs): - r""" - Assert that a code block/function call raises ``expected_exception`` - or raise a failure exception otherwise. - - :kwparam match: if specified, a string containing a regular expression, - or a regular expression object, that is tested against the string - representation of the exception using ``re.search``. To match a literal - string that may contain `special characters`__, the pattern can - first be escaped with ``re.escape``. - - __ https://docs.python.org/3/library/re.html#regular-expression-syntax - - :kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message - if the exception is not raised. See :ref:`the deprecation docs <raises message deprecated>` for a workaround. - - .. currentmodule:: _pytest._code - - Use ``pytest.raises`` as a context manager, which will capture the exception of the given - type:: - - >>> with raises(ZeroDivisionError): - ... 1/0 - - If the code block does not raise the expected exception (``ZeroDivisionError`` in the example - above), or no exception at all, the check will fail instead. - - You can also use the keyword argument ``match`` to assert that the - exception matches a text or regex:: - - >>> with raises(ValueError, match='must be 0 or None'): - ... raise ValueError("value must be 0 or None") - - >>> with raises(ValueError, match=r'must be \d+$'): - ... raise ValueError("value must be 42") - - The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the - details of the captured exception:: - - >>> with raises(ValueError) as exc_info: - ... raise ValueError("value must be 42") - >>> assert exc_info.type is ValueError - >>> assert exc_info.value.args[0] == "value must be 42" - - .. deprecated:: 4.1 - - In the context manager form you may use the keyword argument - ``message`` to specify a custom failure message that will be displayed - in case the ``pytest.raises`` check fails. This has been deprecated as it - is considered error prone as users often mean to use ``match`` instead. - See :ref:`the deprecation docs <raises message deprecated>` for a workaround. - - .. note:: - - When using ``pytest.raises`` as a context manager, it's worthwhile to - note that normal context manager rules apply and that the exception - raised *must* be the final line in the scope of the context manager. - Lines of code after that, within the scope of the context manager will - not be executed. For example:: - - >>> value = 15 - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... assert exc_info.type is ValueError # this will not execute - - Instead, the following approach must be taken (note the difference in - scope):: - - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... - >>> assert exc_info.type is ValueError - - **Using with** ``pytest.mark.parametrize`` - - When using :ref:`pytest.mark.parametrize ref` - it is possible to parametrize tests such that - some runs raise an exception and others do not. - - See :ref:`parametrizing_conditional_raising` for an example. - - **Legacy form** - - It is possible to specify a callable by passing a to-be-called lambda:: - - >>> raises(ZeroDivisionError, lambda: 1/0) - <ExceptionInfo ...> - - or you can specify an arbitrary callable with arguments:: - - >>> def f(x): return 1/x - ... - >>> raises(ZeroDivisionError, f, 0) - <ExceptionInfo ...> - >>> raises(ZeroDivisionError, f, x=0) - <ExceptionInfo ...> - - The form above is fully supported but discouraged for new code because the - context manager form is regarded as more readable and less error-prone. - - .. note:: - Similar to caught exception objects in Python, explicitly clearing - local references to returned ``ExceptionInfo`` objects can - help the Python interpreter speed up its garbage collection. - - Clearing those references breaks a reference cycle - (``ExceptionInfo`` --> caught exception --> frame stack raising - the exception --> current frame stack --> local variables --> - ``ExceptionInfo``) which makes Python keep all objects referenced - from that cycle (including all local variables in the current - frame) alive until the next cyclic garbage collection run. See the - official Python ``try`` statement documentation for more detailed - information. - - """ - __tracebackhide__ = True - for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): - msg = ( - "exceptions must be old-style classes or" - " derived from BaseException, not %s" - ) - raise TypeError(msg % type(exc)) - - message = "DID NOT RAISE {}".format(expected_exception) - match_expr = None - - if not args: - if "message" in kwargs: - message = kwargs.pop("message") - warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2) - if "match" in kwargs: - match_expr = kwargs.pop("match") - if kwargs: - msg = "Unexpected keyword arguments passed to pytest.raises: " - msg += ", ".join(sorted(kwargs)) - raise TypeError(msg) - return RaisesContext(expected_exception, message, match_expr) - elif isinstance(args[0], str): - warnings.warn(deprecated.RAISES_EXEC, stacklevel=2) - (code,) = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - # print "raises frame scope: %r" % frame.f_locals - try: - code = _pytest._code.Source(code).compile(_genframe=frame) - exec(code, frame.f_globals, loc) - # XXX didn't mean f_globals == f_locals something special? - # this is destroyed here ... - except expected_exception: - return _pytest._code.ExceptionInfo.from_current() - else: - func = args[0] - try: - func(*args[1:], **kwargs) - except expected_exception: - return _pytest._code.ExceptionInfo.from_current() - fail(message) - - -raises.Exception = fail.Exception - - -class RaisesContext(object): - def __init__(self, expected_exception, message, match_expr): - self.expected_exception = expected_exception - self.message = message - self.match_expr = match_expr - self.excinfo = None - - def __enter__(self): - self.excinfo = _pytest._code.ExceptionInfo.for_later() - return self.excinfo - - def __exit__(self, *tp): - __tracebackhide__ = True - if tp[0] is None: - fail(self.message) - self.excinfo.__init__(tp) - suppress_exception = issubclass(self.excinfo.type, self.expected_exception) - if sys.version_info[0] == 2 and suppress_exception: - sys.exc_clear() - if self.match_expr is not None and suppress_exception: - self.excinfo.match(self.match_expr) - return suppress_exception diff --git a/contrib/python/pytest/py2/_pytest/recwarn.py b/contrib/python/pytest/py2/_pytest/recwarn.py deleted file mode 100644 index 7abf2e9355..0000000000 --- a/contrib/python/pytest/py2/_pytest/recwarn.py +++ /dev/null @@ -1,251 +0,0 @@ -# -*- coding: utf-8 -*- -""" recording warnings during test function execution. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import inspect -import re -import sys -import warnings - -import six - -import _pytest._code -from _pytest.deprecated import PYTEST_WARNS_UNKNOWN_KWARGS -from _pytest.deprecated import WARNS_EXEC -from _pytest.fixtures import yield_fixture -from _pytest.outcomes import fail - - -@yield_fixture -def recwarn(): - """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - - See http://docs.python.org/library/warnings.html for information - on warning categories. - """ - wrec = WarningsRecorder() - with wrec: - warnings.simplefilter("default") - yield wrec - - -def deprecated_call(func=None, *args, **kwargs): - """context manager that can be used to ensure a block of code triggers a - ``DeprecationWarning`` or ``PendingDeprecationWarning``:: - - >>> import warnings - >>> def api_call_v2(): - ... warnings.warn('use v3 of this api', DeprecationWarning) - ... return 200 - - >>> with deprecated_call(): - ... assert api_call_v2() == 200 - - ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, - in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings - types above. - """ - __tracebackhide__ = True - if func is not None: - args = (func,) + args - return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) - - -def warns(expected_warning, *args, **kwargs): - r"""Assert that code raises a particular class of warning. - - Specifically, the parameter ``expected_warning`` can be a warning class or - sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or - classes. - - This helper produces a list of :class:`warnings.WarningMessage` objects, - one for each warning raised. - - This function can be used as a context manager, or any of the other ways - ``pytest.raises`` can be used:: - - >>> with warns(RuntimeWarning): - ... warnings.warn("my warning", RuntimeWarning) - - In the context manager form you may use the keyword argument ``match`` to assert - that the exception matches a text or regex:: - - >>> with warns(UserWarning, match='must be 0 or None'): - ... warnings.warn("value must be 0 or None", UserWarning) - - >>> with warns(UserWarning, match=r'must be \d+$'): - ... warnings.warn("value must be 42", UserWarning) - - >>> with warns(UserWarning, match=r'must be \d+$'): - ... warnings.warn("this is not here", UserWarning) - Traceback (most recent call last): - ... - Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... - - """ - __tracebackhide__ = True - if not args: - match_expr = kwargs.pop("match", None) - if kwargs: - warnings.warn( - PYTEST_WARNS_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=2 - ) - return WarningsChecker(expected_warning, match_expr=match_expr) - elif isinstance(args[0], str): - warnings.warn(WARNS_EXEC, stacklevel=2) - (code,) = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - - with WarningsChecker(expected_warning): - code = _pytest._code.Source(code).compile() - exec(code, frame.f_globals, loc) - else: - func = args[0] - with WarningsChecker(expected_warning): - return func(*args[1:], **kwargs) - - -class WarningsRecorder(warnings.catch_warnings): - """A context manager to record raised warnings. - - Adapted from `warnings.catch_warnings`. - """ - - def __init__(self): - super(WarningsRecorder, self).__init__(record=True) - self._entered = False - self._list = [] - - @property - def list(self): - """The list of recorded warnings.""" - return self._list - - def __getitem__(self, i): - """Get a recorded warning by index.""" - return self._list[i] - - def __iter__(self): - """Iterate through the recorded warnings.""" - return iter(self._list) - - def __len__(self): - """The number of recorded warnings.""" - return len(self._list) - - def pop(self, cls=Warning): - """Pop the first recorded warning, raise exception if not exists.""" - for i, w in enumerate(self._list): - if issubclass(w.category, cls): - return self._list.pop(i) - __tracebackhide__ = True - raise AssertionError("%r not found in warning list" % cls) - - def clear(self): - """Clear the list of recorded warnings.""" - self._list[:] = [] - - def __enter__(self): - if self._entered: - __tracebackhide__ = True - raise RuntimeError("Cannot enter %r twice" % self) - self._list = super(WarningsRecorder, self).__enter__() - warnings.simplefilter("always") - # python3 keeps track of a "filter version", when the filters are - # updated previously seen warnings can be re-warned. python2 has no - # concept of this so we must reset the warnings registry manually. - # trivial patching of `warnings.warn` seems to be enough somehow? - if six.PY2: - - def warn(message, category=None, stacklevel=1): - # duplicate the stdlib logic due to - # bad handing in the c version of warnings - if isinstance(message, Warning): - category = message.__class__ - # Check category argument - if category is None: - category = UserWarning - assert issubclass(category, Warning) - - # emulate resetting the warn registry - f_globals = sys._getframe(stacklevel).f_globals - if "__warningregistry__" in f_globals: - orig = f_globals["__warningregistry__"] - f_globals["__warningregistry__"] = None - try: - return self._saved_warn(message, category, stacklevel + 1) - finally: - f_globals["__warningregistry__"] = orig - else: - return self._saved_warn(message, category, stacklevel + 1) - - warnings.warn, self._saved_warn = warn, warnings.warn - return self - - def __exit__(self, *exc_info): - if not self._entered: - __tracebackhide__ = True - raise RuntimeError("Cannot exit %r without entering first" % self) - # see above where `self._saved_warn` is assigned - if six.PY2: - warnings.warn = self._saved_warn - super(WarningsRecorder, self).__exit__(*exc_info) - - # Built-in catch_warnings does not reset entered state so we do it - # manually here for this context manager to become reusable. - self._entered = False - - -class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None, match_expr=None): - super(WarningsChecker, self).__init__() - - msg = "exceptions must be old-style classes or derived from Warning, not %s" - if isinstance(expected_warning, tuple): - for exc in expected_warning: - if not inspect.isclass(exc): - raise TypeError(msg % type(exc)) - elif inspect.isclass(expected_warning): - expected_warning = (expected_warning,) - elif expected_warning is not None: - raise TypeError(msg % type(expected_warning)) - - self.expected_warning = expected_warning - self.match_expr = match_expr - - def __exit__(self, *exc_info): - super(WarningsChecker, self).__exit__(*exc_info) - - __tracebackhide__ = True - - # only check if we're not currently handling an exception - if all(a is None for a in exc_info): - if self.expected_warning is not None: - if not any(issubclass(r.category, self.expected_warning) for r in self): - __tracebackhide__ = True - fail( - "DID NOT WARN. No warnings of type {} was emitted. " - "The list of emitted warnings is: {}.".format( - self.expected_warning, [each.message for each in self] - ) - ) - elif self.match_expr is not None: - for r in self: - if issubclass(r.category, self.expected_warning): - if re.compile(self.match_expr).search(str(r.message)): - break - else: - fail( - "DID NOT WARN. No warnings of type {} matching" - " ('{}') was emitted. The list of emitted warnings" - " is: {}.".format( - self.expected_warning, - self.match_expr, - [each.message for each in self], - ) - ) diff --git a/contrib/python/pytest/py2/_pytest/reports.py b/contrib/python/pytest/py2/_pytest/reports.py deleted file mode 100644 index 0bba6762c3..0000000000 --- a/contrib/python/pytest/py2/_pytest/reports.py +++ /dev/null @@ -1,435 +0,0 @@ -# -*- coding: utf-8 -*- -from pprint import pprint - -import py -import six - -from _pytest._code.code import ExceptionInfo -from _pytest._code.code import ReprEntry -from _pytest._code.code import ReprEntryNative -from _pytest._code.code import ReprExceptionInfo -from _pytest._code.code import ReprFileLocation -from _pytest._code.code import ReprFuncArgs -from _pytest._code.code import ReprLocals -from _pytest._code.code import ReprTraceback -from _pytest._code.code import TerminalRepr -from _pytest.outcomes import skip -from _pytest.pathlib import Path - - -def getslaveinfoline(node): - try: - return node._slaveinfocache - except AttributeError: - d = node.slaveinfo - ver = "%s.%s.%s" % d["version_info"][:3] - node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( - d["id"], - d["sysplatform"], - ver, - d["executable"], - ) - return s - - -class BaseReport(object): - when = None - location = None - - def __init__(self, **kw): - self.__dict__.update(kw) - - def toterminal(self, out): - if hasattr(self, "node"): - out.line(getslaveinfoline(self.node)) - - longrepr = self.longrepr - if longrepr is None: - return - - if hasattr(longrepr, "toterminal"): - longrepr.toterminal(out) - else: - try: - out.line(longrepr) - except UnicodeEncodeError: - out.line("<unprintable longrepr>") - - def get_sections(self, prefix): - for name, content in self.sections: - if name.startswith(prefix): - yield prefix, content - - @property - def longreprtext(self): - """ - Read-only property that returns the full string representation - of ``longrepr``. - - .. versionadded:: 3.0 - """ - tw = py.io.TerminalWriter(stringio=True) - tw.hasmarkup = False - self.toterminal(tw) - exc = tw.stringio.getvalue() - return exc.strip() - - @property - def caplog(self): - """Return captured log lines, if log capturing is enabled - - .. versionadded:: 3.5 - """ - return "\n".join( - content for (prefix, content) in self.get_sections("Captured log") - ) - - @property - def capstdout(self): - """Return captured text from stdout, if capturing is enabled - - .. versionadded:: 3.0 - """ - return "".join( - content for (prefix, content) in self.get_sections("Captured stdout") - ) - - @property - def capstderr(self): - """Return captured text from stderr, if capturing is enabled - - .. versionadded:: 3.0 - """ - return "".join( - content for (prefix, content) in self.get_sections("Captured stderr") - ) - - passed = property(lambda x: x.outcome == "passed") - failed = property(lambda x: x.outcome == "failed") - skipped = property(lambda x: x.outcome == "skipped") - - @property - def fspath(self): - return self.nodeid.split("::")[0] - - @property - def count_towards_summary(self): - """ - **Experimental** - - Returns True if this report should be counted towards the totals shown at the end of the - test session: "1 passed, 1 failure, etc". - - .. note:: - - This function is considered **experimental**, so beware that it is subject to changes - even in patch releases. - """ - return True - - @property - def head_line(self): - """ - **Experimental** - - Returns the head line shown with longrepr output for this report, more commonly during - traceback representation during failures:: - - ________ Test.foo ________ - - - In the example above, the head_line is "Test.foo". - - .. note:: - - This function is considered **experimental**, so beware that it is subject to changes - even in patch releases. - """ - if self.location is not None: - fspath, lineno, domain = self.location - return domain - - def _get_verbose_word(self, config): - _category, _short, verbose = config.hook.pytest_report_teststatus( - report=self, config=config - ) - return verbose - - def _to_json(self): - """ - This was originally the serialize_report() function from xdist (ca03269). - - Returns the contents of this report as a dict of builtin entries, suitable for - serialization. - - Experimental method. - """ - - def disassembled_report(rep): - reprtraceback = rep.longrepr.reprtraceback.__dict__.copy() - reprcrash = rep.longrepr.reprcrash.__dict__.copy() - - new_entries = [] - for entry in reprtraceback["reprentries"]: - entry_data = { - "type": type(entry).__name__, - "data": entry.__dict__.copy(), - } - for key, value in entry_data["data"].items(): - if hasattr(value, "__dict__"): - entry_data["data"][key] = value.__dict__.copy() - new_entries.append(entry_data) - - reprtraceback["reprentries"] = new_entries - - return { - "reprcrash": reprcrash, - "reprtraceback": reprtraceback, - "sections": rep.longrepr.sections, - } - - d = self.__dict__.copy() - if hasattr(self.longrepr, "toterminal"): - if hasattr(self.longrepr, "reprtraceback") and hasattr( - self.longrepr, "reprcrash" - ): - d["longrepr"] = disassembled_report(self) - else: - d["longrepr"] = six.text_type(self.longrepr) - else: - d["longrepr"] = self.longrepr - for name in d: - if isinstance(d[name], (py.path.local, Path)): - d[name] = str(d[name]) - elif name == "result": - d[name] = None # for now - return d - - @classmethod - def _from_json(cls, reportdict): - """ - This was originally the serialize_report() function from xdist (ca03269). - - Factory method that returns either a TestReport or CollectReport, depending on the calling - class. It's the callers responsibility to know which class to pass here. - - Experimental method. - """ - if reportdict["longrepr"]: - if ( - "reprcrash" in reportdict["longrepr"] - and "reprtraceback" in reportdict["longrepr"] - ): - - reprtraceback = reportdict["longrepr"]["reprtraceback"] - reprcrash = reportdict["longrepr"]["reprcrash"] - - unserialized_entries = [] - reprentry = None - for entry_data in reprtraceback["reprentries"]: - data = entry_data["data"] - entry_type = entry_data["type"] - if entry_type == "ReprEntry": - reprfuncargs = None - reprfileloc = None - reprlocals = None - if data["reprfuncargs"]: - reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) - if data["reprfileloc"]: - reprfileloc = ReprFileLocation(**data["reprfileloc"]) - if data["reprlocals"]: - reprlocals = ReprLocals(data["reprlocals"]["lines"]) - - reprentry = ReprEntry( - lines=data["lines"], - reprfuncargs=reprfuncargs, - reprlocals=reprlocals, - filelocrepr=reprfileloc, - style=data["style"], - ) - elif entry_type == "ReprEntryNative": - reprentry = ReprEntryNative(data["lines"]) - else: - _report_unserialization_failure(entry_type, cls, reportdict) - unserialized_entries.append(reprentry) - reprtraceback["reprentries"] = unserialized_entries - - exception_info = ReprExceptionInfo( - reprtraceback=ReprTraceback(**reprtraceback), - reprcrash=ReprFileLocation(**reprcrash), - ) - - for section in reportdict["longrepr"]["sections"]: - exception_info.addsection(*section) - reportdict["longrepr"] = exception_info - - return cls(**reportdict) - - -def _report_unserialization_failure(type_name, report_class, reportdict): - url = "https://github.com/pytest-dev/pytest/issues" - stream = py.io.TextIO() - pprint("-" * 100, stream=stream) - pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream) - pprint("report_name: %s" % report_class, stream=stream) - pprint(reportdict, stream=stream) - pprint("Please report this bug at %s" % url, stream=stream) - pprint("-" * 100, stream=stream) - raise RuntimeError(stream.getvalue()) - - -class TestReport(BaseReport): - """ Basic test report object (also used for setup and teardown calls if - they fail). - """ - - __test__ = False - - def __init__( - self, - nodeid, - location, - keywords, - outcome, - longrepr, - when, - sections=(), - duration=0, - user_properties=None, - **extra - ): - #: normalized collection node id - self.nodeid = nodeid - - #: a (filesystempath, lineno, domaininfo) tuple indicating the - #: actual location of a test item - it might be different from the - #: collected one e.g. if a method is inherited from a different module. - self.location = location - - #: a name -> value dictionary containing all keywords and - #: markers associated with a test invocation. - self.keywords = keywords - - #: test outcome, always one of "passed", "failed", "skipped". - self.outcome = outcome - - #: None or a failure representation. - self.longrepr = longrepr - - #: one of 'setup', 'call', 'teardown' to indicate runtest phase. - self.when = when - - #: user properties is a list of tuples (name, value) that holds user - #: defined properties of the test - self.user_properties = list(user_properties or []) - - #: list of pairs ``(str, str)`` of extra information which needs to - #: marshallable. Used by pytest to add captured text - #: from ``stdout`` and ``stderr``, but may be used by other plugins - #: to add arbitrary information to reports. - self.sections = list(sections) - - #: time it took to run just the test - self.duration = duration - - self.__dict__.update(extra) - - def __repr__(self): - return "<%s %r when=%r outcome=%r>" % ( - self.__class__.__name__, - self.nodeid, - self.when, - self.outcome, - ) - - @classmethod - def from_item_and_call(cls, item, call): - """ - Factory method to create and fill a TestReport with standard item and call info. - """ - when = call.when - duration = call.stop - call.start - keywords = {x: 1 for x in item.keywords} - excinfo = call.excinfo - sections = [] - if not call.excinfo: - outcome = "passed" - longrepr = None - else: - if not isinstance(excinfo, ExceptionInfo): - outcome = "failed" - longrepr = excinfo - elif excinfo.errisinstance(skip.Exception): - outcome = "skipped" - r = excinfo._getreprcrash() - longrepr = (str(r.path), r.lineno, r.message) - else: - outcome = "failed" - if call.when == "call": - longrepr = item.repr_failure(excinfo) - else: # exception in setup or teardown - longrepr = item._repr_failure_py( - excinfo, style=item.config.getoption("tbstyle", "auto") - ) - for rwhen, key, content in item._report_sections: - sections.append(("Captured %s %s" % (key, rwhen), content)) - return cls( - item.nodeid, - item.location, - keywords, - outcome, - longrepr, - when, - sections, - duration, - user_properties=item.user_properties, - ) - - -class CollectReport(BaseReport): - when = "collect" - - def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): - self.nodeid = nodeid - self.outcome = outcome - self.longrepr = longrepr - self.result = result or [] - self.sections = list(sections) - self.__dict__.update(extra) - - @property - def location(self): - return (self.fspath, None, self.fspath) - - def __repr__(self): - return "<CollectReport %r lenresult=%s outcome=%r>" % ( - self.nodeid, - len(self.result), - self.outcome, - ) - - -class CollectErrorRepr(TerminalRepr): - def __init__(self, msg): - self.longrepr = msg - - def toterminal(self, out): - out.line(self.longrepr, red=True) - - -def pytest_report_to_serializable(report): - if isinstance(report, (TestReport, CollectReport)): - data = report._to_json() - data["_report_type"] = report.__class__.__name__ - return data - - -def pytest_report_from_serializable(data): - if "_report_type" in data: - if data["_report_type"] == "TestReport": - return TestReport._from_json(data) - elif data["_report_type"] == "CollectReport": - return CollectReport._from_json(data) - assert False, "Unknown report_type unserialize data: {}".format( - data["_report_type"] - ) diff --git a/contrib/python/pytest/py2/_pytest/resultlog.py b/contrib/python/pytest/py2/_pytest/resultlog.py deleted file mode 100644 index bd30b5071e..0000000000 --- a/contrib/python/pytest/py2/_pytest/resultlog.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -""" log machine-parseable test session result information in a plain -text file. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import py - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "resultlog plugin options") - group.addoption( - "--resultlog", - "--result-log", - action="store", - metavar="path", - default=None, - help="DEPRECATED path for machine-readable result log.", - ) - - -def pytest_configure(config): - resultlog = config.option.resultlog - # prevent opening resultlog on slave nodes (xdist) - if resultlog and not hasattr(config, "slaveinput"): - dirname = os.path.dirname(os.path.abspath(resultlog)) - if not os.path.isdir(dirname): - os.makedirs(dirname) - logfile = open(resultlog, "w", 1) # line buffered - config._resultlog = ResultLog(config, logfile) - config.pluginmanager.register(config._resultlog) - - from _pytest.deprecated import RESULT_LOG - from _pytest.warnings import _issue_warning_captured - - _issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2) - - -def pytest_unconfigure(config): - resultlog = getattr(config, "_resultlog", None) - if resultlog: - resultlog.logfile.close() - del config._resultlog - config.pluginmanager.unregister(resultlog) - - -class ResultLog(object): - def __init__(self, config, logfile): - self.config = config - self.logfile = logfile # preferably line buffered - - def write_log_entry(self, testpath, lettercode, longrepr): - print("%s %s" % (lettercode, testpath), file=self.logfile) - for line in longrepr.splitlines(): - print(" %s" % line, file=self.logfile) - - def log_outcome(self, report, lettercode, longrepr): - testpath = getattr(report, "nodeid", None) - if testpath is None: - testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) - - def pytest_runtest_logreport(self, report): - if report.when != "call" and report.passed: - return - res = self.config.hook.pytest_report_teststatus( - report=report, config=self.config - ) - code = res[1] - if code == "x": - longrepr = str(report.longrepr) - elif code == "X": - longrepr = "" - elif report.passed: - longrepr = "" - elif report.failed: - longrepr = str(report.longrepr) - elif report.skipped: - longrepr = str(report.longrepr[2]) - self.log_outcome(report, code, longrepr) - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - code = "F" - longrepr = str(report.longrepr) - else: - assert report.skipped - code = "S" - longrepr = "%s:%d: %s" % report.longrepr - self.log_outcome(report, code, longrepr) - - def pytest_internalerror(self, excrepr): - reprcrash = getattr(excrepr, "reprcrash", None) - path = getattr(reprcrash, "path", None) - if path is None: - path = "cwd:%s" % py.path.local() - self.write_log_entry(path, "!", str(excrepr)) diff --git a/contrib/python/pytest/py2/_pytest/runner.py b/contrib/python/pytest/py2/_pytest/runner.py deleted file mode 100644 index 34ae917738..0000000000 --- a/contrib/python/pytest/py2/_pytest/runner.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -""" basic collect and runtest protocol implementations """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import bdb -import os -import sys -from time import time - -import attr -import six - -from .reports import CollectErrorRepr -from .reports import CollectReport -from .reports import TestReport -from _pytest._code.code import ExceptionInfo -from _pytest.compat import safe_str -from _pytest.outcomes import Exit -from _pytest.outcomes import Skipped -from _pytest.outcomes import TEST_OUTCOME - -# -# pytest plugin hooks - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "reporting", after="general") - group.addoption( - "--durations", - action="store", - type=int, - default=None, - metavar="N", - help="show N slowest setup/test durations (N=0 for all).", - ), - - -def pytest_terminal_summary(terminalreporter): - durations = terminalreporter.config.option.durations - verbose = terminalreporter.config.getvalue("verbose") - if durations is None: - return - tr = terminalreporter - dlist = [] - for replist in tr.stats.values(): - for rep in replist: - if hasattr(rep, "duration"): - dlist.append(rep) - if not dlist: - return - dlist.sort(key=lambda x: x.duration) - dlist.reverse() - if not durations: - tr.write_sep("=", "slowest test durations") - else: - tr.write_sep("=", "slowest %s test durations" % durations) - dlist = dlist[:durations] - - for rep in dlist: - if verbose < 2 and rep.duration < 0.005: - tr.write_line("") - tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)") - break - tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, rep.nodeid)) - - -def pytest_sessionstart(session): - session._setupstate = SetupState() - - -def pytest_sessionfinish(session): - session._setupstate.teardown_all() - - -def pytest_runtest_protocol(item, nextitem): - item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) - runtestprotocol(item, nextitem=nextitem) - item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) - return True - - -def runtestprotocol(item, log=True, nextitem=None): - hasrequest = hasattr(item, "_request") - if hasrequest and not item._request: - item._initrequest() - rep = call_and_report(item, "setup", log) - reports = [rep] - if rep.passed: - if item.config.getoption("setupshow", False): - show_test_item(item) - if not item.config.getoption("setuponly", False): - reports.append(call_and_report(item, "call", log)) - reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) - # after all teardown hooks have been called - # want funcargs and request info to go away - if hasrequest: - item._request = False - item.funcargs = None - return reports - - -def show_test_item(item): - """Show test function, parameters and the fixtures of the test item.""" - tw = item.config.get_terminal_writer() - tw.line() - tw.write(" " * 8) - tw.write(item._nodeid) - used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) - if used_fixtures: - tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) - - -def pytest_runtest_setup(item): - _update_current_test_var(item, "setup") - item.session._setupstate.prepare(item) - - -def pytest_runtest_call(item): - _update_current_test_var(item, "call") - sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) - try: - item.runtest() - except Exception: - # Store trace info to allow postmortem debugging - type, value, tb = sys.exc_info() - tb = tb.tb_next # Skip *this* frame - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - del type, value, tb # Get rid of these in this frame - raise - - -def pytest_runtest_teardown(item, nextitem): - _update_current_test_var(item, "teardown") - item.session._setupstate.teardown_exact(item, nextitem) - _update_current_test_var(item, None) - - -def _update_current_test_var(item, when): - """ - Update PYTEST_CURRENT_TEST to reflect the current item and stage. - - If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. - """ - var_name = "PYTEST_CURRENT_TEST" - if when: - value = "{} ({})".format(item.nodeid, when) - # don't allow null bytes on environment variables (see #2644, #2957) - value = value.replace("\x00", "(null)") - os.environ[var_name] = value - else: - os.environ.pop(var_name) - - -def pytest_report_teststatus(report): - if report.when in ("setup", "teardown"): - if report.failed: - # category, shortletter, verbose-word - return "error", "E", "ERROR" - elif report.skipped: - return "skipped", "s", "SKIPPED" - else: - return "", "", "" - - -# -# Implementation - - -def call_and_report(item, when, log=True, **kwds): - call = call_runtest_hook(item, when, **kwds) - hook = item.ihook - report = hook.pytest_runtest_makereport(item=item, call=call) - if log: - hook.pytest_runtest_logreport(report=report) - if check_interactive_exception(call, report): - hook.pytest_exception_interact(node=item, call=call, report=report) - return report - - -def check_interactive_exception(call, report): - return call.excinfo and not ( - hasattr(report, "wasxfail") - or call.excinfo.errisinstance(Skipped) - or call.excinfo.errisinstance(bdb.BdbQuit) - ) - - -def call_runtest_hook(item, when, **kwds): - hookname = "pytest_runtest_" + when - ihook = getattr(item.ihook, hookname) - reraise = (Exit,) - if not item.config.getoption("usepdb", False): - reraise += (KeyboardInterrupt,) - return CallInfo.from_call( - lambda: ihook(item=item, **kwds), when=when, reraise=reraise - ) - - -@attr.s(repr=False) -class CallInfo(object): - """ Result/Exception info a function invocation. """ - - _result = attr.ib() - # Optional[ExceptionInfo] - excinfo = attr.ib() - start = attr.ib() - stop = attr.ib() - when = attr.ib() - - @property - def result(self): - if self.excinfo is not None: - raise AttributeError("{!r} has no valid result".format(self)) - return self._result - - @classmethod - def from_call(cls, func, when, reraise=None): - #: context of invocation: one of "setup", "call", - #: "teardown", "memocollect" - start = time() - excinfo = None - try: - result = func() - except: # noqa - excinfo = ExceptionInfo.from_current() - if reraise is not None and excinfo.errisinstance(reraise): - raise - result = None - stop = time() - return cls(start=start, stop=stop, when=when, result=result, excinfo=excinfo) - - def __repr__(self): - if self.excinfo is not None: - status = "exception" - value = self.excinfo.value - else: - # TODO: investigate unification - value = repr(self._result) - status = "result" - return "<CallInfo when={when!r} {status}: {value}>".format( - when=self.when, value=safe_str(value), status=status - ) - - -def pytest_runtest_makereport(item, call): - return TestReport.from_item_and_call(item, call) - - -def pytest_make_collect_report(collector): - call = CallInfo.from_call(lambda: list(collector.collect()), "collect") - longrepr = None - if not call.excinfo: - outcome = "passed" - else: - from _pytest import nose - - skip_exceptions = (Skipped,) + nose.get_skip_exceptions() - if call.excinfo.errisinstance(skip_exceptions): - outcome = "skipped" - r = collector._repr_failure_py(call.excinfo, "line").reprcrash - longrepr = (str(r.path), r.lineno, r.message) - else: - outcome = "failed" - errorinfo = collector.repr_failure(call.excinfo) - if not hasattr(errorinfo, "toterminal"): - errorinfo = CollectErrorRepr(errorinfo) - longrepr = errorinfo - rep = CollectReport( - collector.nodeid, outcome, longrepr, getattr(call, "result", None) - ) - rep.call = call # see collect_one_node - return rep - - -class SetupState(object): - """ shared state for setting up/tearing down test items or collectors. """ - - def __init__(self): - self.stack = [] - self._finalizers = {} - - def addfinalizer(self, finalizer, colitem): - """ attach a finalizer to the given colitem. - if colitem is None, this will add a finalizer that - is called at the end of teardown_all(). - """ - assert colitem and not isinstance(colitem, tuple) - assert callable(finalizer) - # assert colitem in self.stack # some unit tests don't setup stack :/ - self._finalizers.setdefault(colitem, []).append(finalizer) - - def _pop_and_teardown(self): - colitem = self.stack.pop() - self._teardown_with_finalization(colitem) - - def _callfinalizers(self, colitem): - finalizers = self._finalizers.pop(colitem, None) - exc = None - while finalizers: - fin = finalizers.pop() - try: - fin() - except TEST_OUTCOME: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = sys.exc_info() - if exc: - six.reraise(*exc) - - def _teardown_with_finalization(self, colitem): - self._callfinalizers(colitem) - if hasattr(colitem, "teardown"): - colitem.teardown() - for colitem in self._finalizers: - assert ( - colitem is None or colitem in self.stack or isinstance(colitem, tuple) - ) - - def teardown_all(self): - while self.stack: - self._pop_and_teardown() - for key in list(self._finalizers): - self._teardown_with_finalization(key) - assert not self._finalizers - - def teardown_exact(self, item, nextitem): - needed_collectors = nextitem and nextitem.listchain() or [] - self._teardown_towards(needed_collectors) - - def _teardown_towards(self, needed_collectors): - exc = None - while self.stack: - if self.stack == needed_collectors[: len(self.stack)]: - break - try: - self._pop_and_teardown() - except TEST_OUTCOME: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = sys.exc_info() - if exc: - six.reraise(*exc) - - def prepare(self, colitem): - """ setup objects along the collector chain to the test-method - and teardown previously setup objects.""" - needed_collectors = colitem.listchain() - self._teardown_towards(needed_collectors) - - # check if the last collection node has raised an error - for col in self.stack: - if hasattr(col, "_prepare_exc"): - six.reraise(*col._prepare_exc) - for col in needed_collectors[len(self.stack) :]: - self.stack.append(col) - try: - col.setup() - except TEST_OUTCOME: - col._prepare_exc = sys.exc_info() - raise - - -def collect_one_node(collector): - ihook = collector.ihook - ihook.pytest_collectstart(collector=collector) - rep = ihook.pytest_make_collect_report(collector=collector) - call = rep.__dict__.pop("call", None) - if call and check_interactive_exception(call, rep): - ihook.pytest_exception_interact(node=collector, call=call, report=rep) - return rep diff --git a/contrib/python/pytest/py2/_pytest/setuponly.py b/contrib/python/pytest/py2/_pytest/setuponly.py deleted file mode 100644 index 0859011241..0000000000 --- a/contrib/python/pytest/py2/_pytest/setuponly.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--setuponly", - "--setup-only", - action="store_true", - help="only setup fixtures, do not execute tests.", - ) - group.addoption( - "--setupshow", - "--setup-show", - action="store_true", - help="show setup of fixtures while executing tests.", - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_fixture_setup(fixturedef, request): - yield - config = request.config - if config.option.setupshow: - if hasattr(request, "param"): - # Save the fixture parameter so ._show_fixture_action() can - # display it now and during the teardown (in .finish()). - if fixturedef.ids: - if callable(fixturedef.ids): - fixturedef.cached_param = fixturedef.ids(request.param) - else: - fixturedef.cached_param = fixturedef.ids[request.param_index] - else: - fixturedef.cached_param = request.param - _show_fixture_action(fixturedef, "SETUP") - - -def pytest_fixture_post_finalizer(fixturedef): - if hasattr(fixturedef, "cached_result"): - config = fixturedef._fixturemanager.config - if config.option.setupshow: - _show_fixture_action(fixturedef, "TEARDOWN") - if hasattr(fixturedef, "cached_param"): - del fixturedef.cached_param - - -def _show_fixture_action(fixturedef, msg): - config = fixturedef._fixturemanager.config - capman = config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture() - out, err = capman.read_global_capture() - - tw = config.get_terminal_writer() - tw.line() - tw.write(" " * 2 * fixturedef.scopenum) - tw.write( - "{step} {scope} {fixture}".format( - step=msg.ljust(8), # align the output to TEARDOWN - scope=fixturedef.scope[0].upper(), - fixture=fixturedef.argname, - ) - ) - - if msg == "SETUP": - deps = sorted(arg for arg in fixturedef.argnames if arg != "request") - if deps: - tw.write(" (fixtures used: {})".format(", ".join(deps))) - - if hasattr(fixturedef, "cached_param"): - tw.write("[{}]".format(fixturedef.cached_param)) - - if capman: - capman.resume_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - -@pytest.hookimpl(tryfirst=True) -def pytest_cmdline_main(config): - if config.option.setuponly: - config.option.setupshow = True diff --git a/contrib/python/pytest/py2/_pytest/setupplan.py b/contrib/python/pytest/py2/_pytest/setupplan.py deleted file mode 100644 index 47b0fe82ef..0000000000 --- a/contrib/python/pytest/py2/_pytest/setupplan.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--setupplan", - "--setup-plan", - action="store_true", - help="show what fixtures and tests would be executed but " - "don't execute anything.", - ) - - -@pytest.hookimpl(tryfirst=True) -def pytest_fixture_setup(fixturedef, request): - # Will return a dummy fixture if the setuponly option is provided. - if request.config.option.setupplan: - fixturedef.cached_result = (None, None, None) - return fixturedef.cached_result - - -@pytest.hookimpl(tryfirst=True) -def pytest_cmdline_main(config): - if config.option.setupplan: - config.option.setuponly = True - config.option.setupshow = True diff --git a/contrib/python/pytest/py2/_pytest/skipping.py b/contrib/python/pytest/py2/_pytest/skipping.py deleted file mode 100644 index bc8b88e717..0000000000 --- a/contrib/python/pytest/py2/_pytest/skipping.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -""" support for skip/xfail functions and markers. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from _pytest.config import hookimpl -from _pytest.mark.evaluate import MarkEvaluator -from _pytest.outcomes import fail -from _pytest.outcomes import skip -from _pytest.outcomes import xfail - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--runxfail", - action="store_true", - dest="runxfail", - default=False, - help="report the results of xfail tests as if they were not marked", - ) - - parser.addini( - "xfail_strict", - "default for the strict parameter of xfail " - "markers when not given explicitly (default: False)", - default=False, - type="bool", - ) - - -def pytest_configure(config): - if config.option.runxfail: - # yay a hack - import pytest - - old = pytest.xfail - config._cleanup.append(lambda: setattr(pytest, "xfail", old)) - - def nop(*args, **kwargs): - pass - - nop.Exception = xfail.Exception - setattr(pytest, "xfail", nop) - - config.addinivalue_line( - "markers", - "skip(reason=None): skip the given test function with an optional reason. " - 'Example: skip(reason="no way of currently testing this") skips the ' - "test.", - ) - config.addinivalue_line( - "markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "https://docs.pytest.org/en/latest/skipping.html", - ) - config.addinivalue_line( - "markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See https://docs.pytest.org/en/latest/skipping.html", - ) - - -@hookimpl(tryfirst=True) -def pytest_runtest_setup(item): - # Check if skip or skipif are specified as pytest marks - item._skipped_by_mark = False - eval_skipif = MarkEvaluator(item, "skipif") - if eval_skipif.istrue(): - item._skipped_by_mark = True - skip(eval_skipif.getexplanation()) - - for skip_info in item.iter_markers(name="skip"): - item._skipped_by_mark = True - if "reason" in skip_info.kwargs: - skip(skip_info.kwargs["reason"]) - elif skip_info.args: - skip(skip_info.args[0]) - else: - skip("unconditional skip") - - item._evalxfail = MarkEvaluator(item, "xfail") - check_xfail_no_run(item) - - -@hookimpl(hookwrapper=True) -def pytest_pyfunc_call(pyfuncitem): - check_xfail_no_run(pyfuncitem) - outcome = yield - passed = outcome.excinfo is None - if passed: - check_strict_xfail(pyfuncitem) - - -def check_xfail_no_run(item): - """check xfail(run=False)""" - if not item.config.option.runxfail: - evalxfail = item._evalxfail - if evalxfail.istrue(): - if not evalxfail.get("run", True): - xfail("[NOTRUN] " + evalxfail.getexplanation()) - - -def check_strict_xfail(pyfuncitem): - """check xfail(strict=True) for the given PASSING test""" - evalxfail = pyfuncitem._evalxfail - if evalxfail.istrue(): - strict_default = pyfuncitem.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - if is_strict_xfail: - del pyfuncitem._evalxfail - explanation = evalxfail.getexplanation() - fail("[XPASS(strict)] " + explanation, pytrace=False) - - -@hookimpl(hookwrapper=True) -def pytest_runtest_makereport(item, call): - outcome = yield - rep = outcome.get_result() - evalxfail = getattr(item, "_evalxfail", None) - # unitttest special case, see setting of _unexpectedsuccess - if hasattr(item, "_unexpectedsuccess") and rep.when == "call": - from _pytest.compat import _is_unittest_unexpected_success_a_failure - - if item._unexpectedsuccess: - rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) - else: - rep.longrepr = "Unexpected success" - if _is_unittest_unexpected_success_a_failure(): - rep.outcome = "failed" - else: - rep.outcome = "passed" - rep.wasxfail = rep.longrepr - elif item.config.option.runxfail: - pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): - rep.wasxfail = "reason: " + call.excinfo.value.msg - rep.outcome = "skipped" - elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): - if call.excinfo: - if evalxfail.invalidraise(call.excinfo.value): - rep.outcome = "failed" - else: - rep.outcome = "skipped" - rep.wasxfail = evalxfail.getexplanation() - elif call.when == "call": - strict_default = item.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - explanation = evalxfail.getexplanation() - if is_strict_xfail: - rep.outcome = "failed" - rep.longrepr = "[XPASS(strict)] {}".format(explanation) - else: - rep.outcome = "passed" - rep.wasxfail = explanation - elif ( - getattr(item, "_skipped_by_mark", False) - and rep.skipped - and type(rep.longrepr) is tuple - ): - # skipped by mark.skipif; change the location of the failure - # to point to the item definition, otherwise it will display - # the location of where the skip exception was raised within pytest - filename, line, reason = rep.longrepr - filename, line = item.location[:2] - rep.longrepr = filename, line, reason - - -# called by terminalreporter progress reporting - - -def pytest_report_teststatus(report): - if hasattr(report, "wasxfail"): - if report.skipped: - return "xfailed", "x", "XFAIL" - elif report.passed: - return "xpassed", "X", "XPASS" diff --git a/contrib/python/pytest/py2/_pytest/stepwise.py b/contrib/python/pytest/py2/_pytest/stepwise.py deleted file mode 100644 index 8890259589..0000000000 --- a/contrib/python/pytest/py2/_pytest/stepwise.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--sw", - "--stepwise", - action="store_true", - dest="stepwise", - help="exit on test failure and continue from last failing test next time", - ) - group.addoption( - "--stepwise-skip", - action="store_true", - dest="stepwise_skip", - help="ignore the first failing test but stop on the next failing test", - ) - - -@pytest.hookimpl -def pytest_configure(config): - config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") - - -class StepwisePlugin: - def __init__(self, config): - self.config = config - self.active = config.getvalue("stepwise") - self.session = None - self.report_status = "" - - if self.active: - self.lastfailed = config.cache.get("cache/stepwise", None) - self.skip = config.getvalue("stepwise_skip") - - def pytest_sessionstart(self, session): - self.session = session - - def pytest_collection_modifyitems(self, session, config, items): - if not self.active: - return - if not self.lastfailed: - self.report_status = "no previously failed tests, not skipping." - return - - already_passed = [] - found = False - - # Make a list of all tests that have been run before the last failing one. - for item in items: - if item.nodeid == self.lastfailed: - found = True - break - else: - already_passed.append(item) - - # If the previously failed test was not found among the test items, - # do not skip any tests. - if not found: - self.report_status = "previously failed test not found, not skipping." - already_passed = [] - else: - self.report_status = "skipping {} already passed items.".format( - len(already_passed) - ) - - for item in already_passed: - items.remove(item) - - config.hook.pytest_deselected(items=already_passed) - - def pytest_runtest_logreport(self, report): - if not self.active: - return - - if report.failed: - if self.skip: - # Remove test from the failed ones (if it exists) and unset the skip option - # to make sure the following tests will not be skipped. - if report.nodeid == self.lastfailed: - self.lastfailed = None - - self.skip = False - else: - # Mark test as the last failing and interrupt the test session. - self.lastfailed = report.nodeid - self.session.shouldstop = ( - "Test failed, continuing from this test next run." - ) - - else: - # If the test was actually run and did pass. - if report.when == "call": - # Remove test from the failed ones, if exists. - if report.nodeid == self.lastfailed: - self.lastfailed = None - - def pytest_report_collectionfinish(self): - if self.active and self.config.getoption("verbose") >= 0 and self.report_status: - return "stepwise: %s" % self.report_status - - def pytest_sessionfinish(self, session): - if self.active: - self.config.cache.set("cache/stepwise", self.lastfailed) - else: - # Clear the list of failing tests if the plugin is not active. - self.config.cache.set("cache/stepwise", []) diff --git a/contrib/python/pytest/py2/_pytest/terminal.py b/contrib/python/pytest/py2/_pytest/terminal.py deleted file mode 100644 index 4418338c65..0000000000 --- a/contrib/python/pytest/py2/_pytest/terminal.py +++ /dev/null @@ -1,1090 +0,0 @@ -# -*- coding: utf-8 -*- -""" terminal reporting of the full testing process. - -This is a good source for looking at the various reporting hooks. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import collections -import platform -import sys -import time -from functools import partial - -import attr -import pluggy -import py -import six -from more_itertools import collapse - -import pytest -from _pytest import nodes -from _pytest.main import EXIT_INTERRUPTED -from _pytest.main import EXIT_NOTESTSCOLLECTED -from _pytest.main import EXIT_OK -from _pytest.main import EXIT_TESTSFAILED -from _pytest.main import EXIT_USAGEERROR - -REPORT_COLLECTING_RESOLUTION = 0.5 - - -class MoreQuietAction(argparse.Action): - """ - a modified copy of the argparse count action which counts down and updates - the legacy quiet attribute at the same time - - used to unify verbosity handling - """ - - def __init__(self, option_strings, dest, default=None, required=False, help=None): - super(MoreQuietAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - default=default, - required=required, - help=help, - ) - - def __call__(self, parser, namespace, values, option_string=None): - new_count = getattr(namespace, self.dest, 0) - 1 - setattr(namespace, self.dest, new_count) - # todo Deprecate config.quiet - namespace.quiet = getattr(namespace, "quiet", 0) + 1 - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "reporting", after="general") - group._addoption( - "-v", - "--verbose", - action="count", - default=0, - dest="verbose", - help="increase verbosity.", - ), - group._addoption( - "-q", - "--quiet", - action=MoreQuietAction, - default=0, - dest="verbose", - help="decrease verbosity.", - ), - group._addoption( - "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" - ) - group._addoption( - "-r", - action="store", - dest="reportchars", - default="", - metavar="chars", - help="show extra test summary info as specified by chars: (f)ailed, " - "(E)rror, (s)kipped, (x)failed, (X)passed, " - "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " - "Warnings are displayed at all times except when " - "--disable-warnings is set.", - ) - group._addoption( - "--disable-warnings", - "--disable-pytest-warnings", - default=False, - dest="disable_warnings", - action="store_true", - help="disable warnings summary", - ) - group._addoption( - "-l", - "--showlocals", - action="store_true", - dest="showlocals", - default=False, - help="show locals in tracebacks (disabled by default).", - ) - group._addoption( - "--tb", - metavar="style", - action="store", - dest="tbstyle", - default="auto", - choices=["auto", "long", "short", "no", "line", "native"], - help="traceback print mode (auto/long/short/line/native/no).", - ) - group._addoption( - "--show-capture", - action="store", - dest="showcapture", - choices=["no", "stdout", "stderr", "log", "all"], - default="all", - help="Controls how captured stdout/stderr/log is shown on failed tests. " - "Default is 'all'.", - ) - group._addoption( - "--fulltrace", - "--full-trace", - action="store_true", - default=False, - help="don't cut any tracebacks (default is to cut).", - ) - group._addoption( - "--color", - metavar="color", - action="store", - dest="color", - default="auto", - choices=["yes", "no", "auto"], - help="color terminal output (yes/no/auto).", - ) - - parser.addini( - "console_output_style", - help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").', - default="progress", - ) - - -def pytest_configure(config): - reporter = TerminalReporter(config, sys.stdout) - config.pluginmanager.register(reporter, "terminalreporter") - if config.option.debug or config.option.traceconfig: - - def mywriter(tags, args): - msg = " ".join(map(str, args)) - reporter.write_line("[traceconfig] " + msg) - - config.trace.root.setprocessor("pytest:config", mywriter) - - -def getreportopt(config): - reportopts = "" - reportchars = config.option.reportchars - if not config.option.disable_warnings and "w" not in reportchars: - reportchars += "w" - elif config.option.disable_warnings and "w" in reportchars: - reportchars = reportchars.replace("w", "") - aliases = {"F", "S"} - for char in reportchars: - # handle old aliases - if char in aliases: - char = char.lower() - if char == "a": - reportopts = "sxXwEf" - elif char == "A": - reportopts = "PpsxXwEf" - break - elif char not in reportopts: - reportopts += char - return reportopts - - -@pytest.hookimpl(trylast=True) # after _pytest.runner -def pytest_report_teststatus(report): - letter = "F" - if report.passed: - letter = "." - elif report.skipped: - letter = "s" - - outcome = report.outcome - if report.when in ("collect", "setup", "teardown") and outcome == "failed": - outcome = "error" - letter = "E" - - return outcome, letter, outcome.upper() - - -@attr.s -class WarningReport(object): - """ - Simple structure to hold warnings information captured by ``pytest_warning_captured``. - - :ivar str message: user friendly message about the warning - :ivar str|None nodeid: node id that generated the warning (see ``get_location``). - :ivar tuple|py.path.local fslocation: - file system location of the source of the warning (see ``get_location``). - """ - - message = attr.ib() - nodeid = attr.ib(default=None) - fslocation = attr.ib(default=None) - count_towards_summary = True - - def get_location(self, config): - """ - Returns the more user-friendly information about the location - of a warning, or None. - """ - if self.nodeid: - return self.nodeid - if self.fslocation: - if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: - filename, linenum = self.fslocation[:2] - relpath = py.path.local(filename).relto(config.invocation_dir) - if not relpath: - relpath = str(filename) - return "%s:%s" % (relpath, linenum) - else: - return str(self.fslocation) - return None - - -class TerminalReporter(object): - def __init__(self, config, file=None): - import _pytest.config - - self.config = config - self._numcollected = 0 - self._session = None - self._showfspath = None - - self.stats = {} - self.startdir = config.invocation_dir - if file is None: - file = sys.stdout - self._tw = _pytest.config.create_terminal_writer(config, file) - # self.writer will be deprecated in pytest-3.4 - self.writer = self._tw - self._screen_width = self._tw.fullwidth - self.currentfspath = None - self.reportchars = getreportopt(config) - self.hasmarkup = self._tw.hasmarkup - self.isatty = file.isatty() - self._progress_nodeids_reported = set() - self._show_progress_info = self._determine_show_progress_info() - self._collect_report_last_write = None - - def _determine_show_progress_info(self): - """Return True if we should display progress information based on the current config""" - # do not show progress if we are not capturing output (#3038) - if self.config.getoption("capture", "no") == "no": - return False - # do not show progress if we are showing fixture setup/teardown - if self.config.getoption("setupshow", False): - return False - cfg = self.config.getini("console_output_style") - if cfg in ("progress", "count"): - return cfg - return False - - @property - def verbosity(self): - return self.config.option.verbose - - @property - def showheader(self): - return self.verbosity >= 0 - - @property - def showfspath(self): - if self._showfspath is None: - return self.verbosity >= 0 - return self._showfspath - - @showfspath.setter - def showfspath(self, value): - self._showfspath = value - - @property - def showlongtestinfo(self): - return self.verbosity > 0 - - def hasopt(self, char): - char = {"xfailed": "x", "skipped": "s"}.get(char, char) - return char in self.reportchars - - def write_fspath_result(self, nodeid, res, **markup): - fspath = self.config.rootdir.join(nodeid.split("::")[0]) - # NOTE: explicitly check for None to work around py bug, and for less - # overhead in general (https://github.com/pytest-dev/py/pull/207). - if self.currentfspath is None or fspath != self.currentfspath: - if self.currentfspath is not None and self._show_progress_info: - self._write_progress_information_filling_space() - self.currentfspath = fspath - fspath = self.startdir.bestrelpath(fspath) - self._tw.line() - self._tw.write(fspath + " ") - self._tw.write(res, **markup) - - def write_ensure_prefix(self, prefix, extra="", **kwargs): - if self.currentfspath != prefix: - self._tw.line() - self.currentfspath = prefix - self._tw.write(prefix) - if extra: - self._tw.write(extra, **kwargs) - self.currentfspath = -2 - - def ensure_newline(self): - if self.currentfspath: - self._tw.line() - self.currentfspath = None - - def write(self, content, **markup): - self._tw.write(content, **markup) - - def write_line(self, line, **markup): - if not isinstance(line, six.text_type): - line = six.text_type(line, errors="replace") - self.ensure_newline() - self._tw.line(line, **markup) - - def rewrite(self, line, **markup): - """ - Rewinds the terminal cursor to the beginning and writes the given line. - - :kwarg erase: if True, will also add spaces until the full terminal width to ensure - previous lines are properly erased. - - The rest of the keyword arguments are markup instructions. - """ - erase = markup.pop("erase", False) - if erase: - fill_count = self._tw.fullwidth - len(line) - 1 - fill = " " * fill_count - else: - fill = "" - line = str(line) - self._tw.write("\r" + line + fill, **markup) - - def write_sep(self, sep, title=None, **markup): - self.ensure_newline() - self._tw.sep(sep, title, **markup) - - def section(self, title, sep="=", **kw): - self._tw.sep(sep, title, **kw) - - def line(self, msg, **kw): - self._tw.line(msg, **kw) - - def pytest_internalerror(self, excrepr): - for line in six.text_type(excrepr).split("\n"): - self.write_line("INTERNALERROR> " + line) - return 1 - - def pytest_warning_captured(self, warning_message, item): - # from _pytest.nodes import get_fslocation_from_item - from _pytest.warnings import warning_record_to_str - - warnings = self.stats.setdefault("warnings", []) - fslocation = warning_message.filename, warning_message.lineno - message = warning_record_to_str(warning_message) - - nodeid = item.nodeid if item is not None else "" - warning_report = WarningReport( - fslocation=fslocation, message=message, nodeid=nodeid - ) - warnings.append(warning_report) - - def pytest_plugin_registered(self, plugin): - if self.config.option.traceconfig: - msg = "PLUGIN registered: %s" % (plugin,) - # XXX this event may happen during setup/teardown time - # which unfortunately captures our output here - # which garbles our output if we use self.write_line - self.write_line(msg) - - def pytest_deselected(self, items): - self.stats.setdefault("deselected", []).extend(items) - - def pytest_runtest_logstart(self, nodeid, location): - # ensure that the path is printed before the - # 1st test of a module starts running - if self.showlongtestinfo: - line = self._locationline(nodeid, *location) - self.write_ensure_prefix(line, "") - elif self.showfspath: - fsid = nodeid.split("::")[0] - self.write_fspath_result(fsid, "") - - def pytest_runtest_logreport(self, report): - self._tests_ran = True - rep = report - res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) - category, letter, word = res - if isinstance(word, tuple): - word, markup = word - else: - markup = None - self.stats.setdefault(category, []).append(rep) - if not letter and not word: - # probably passed setup/teardown - return - running_xdist = hasattr(rep, "node") - if markup is None: - was_xfail = hasattr(report, "wasxfail") - if rep.passed and not was_xfail: - markup = {"green": True} - elif rep.passed and was_xfail: - markup = {"yellow": True} - elif rep.failed: - markup = {"red": True} - elif rep.skipped: - markup = {"yellow": True} - else: - markup = {} - if self.verbosity <= 0: - if not running_xdist and self.showfspath: - self.write_fspath_result(rep.nodeid, letter, **markup) - else: - self._tw.write(letter, **markup) - else: - self._progress_nodeids_reported.add(rep.nodeid) - line = self._locationline(rep.nodeid, *rep.location) - if not running_xdist: - self.write_ensure_prefix(line, word, **markup) - if self._show_progress_info: - self._write_progress_information_filling_space() - else: - self.ensure_newline() - self._tw.write("[%s]" % rep.node.gateway.id) - if self._show_progress_info: - self._tw.write( - self._get_progress_information_message() + " ", cyan=True - ) - else: - self._tw.write(" ") - self._tw.write(word, **markup) - self._tw.write(" " + line) - self.currentfspath = -2 - - def pytest_runtest_logfinish(self, nodeid): - if self.verbosity <= 0 and self._show_progress_info: - if self._show_progress_info == "count": - num_tests = self._session.testscollected - progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) - else: - progress_length = len(" [100%]") - - self._progress_nodeids_reported.add(nodeid) - is_last_item = ( - len(self._progress_nodeids_reported) == self._session.testscollected - ) - if is_last_item: - self._write_progress_information_filling_space() - else: - w = self._width_of_current_line - past_edge = w + progress_length + 1 >= self._screen_width - if past_edge: - msg = self._get_progress_information_message() - self._tw.write(msg + "\n", cyan=True) - - def _get_progress_information_message(self): - collected = self._session.testscollected - if self._show_progress_info == "count": - if collected: - progress = self._progress_nodeids_reported - counter_format = "{{:{}d}}".format(len(str(collected))) - format_string = " [{}/{{}}]".format(counter_format) - return format_string.format(len(progress), collected) - return " [ {} / {} ]".format(collected, collected) - else: - if collected: - progress = len(self._progress_nodeids_reported) * 100 // collected - return " [{:3d}%]".format(progress) - return " [100%]" - - def _write_progress_information_filling_space(self): - msg = self._get_progress_information_message() - w = self._width_of_current_line - fill = self._tw.fullwidth - w - 1 - self.write(msg.rjust(fill), cyan=True) - - @property - def _width_of_current_line(self): - """Return the width of current line, using the superior implementation of py-1.6 when available""" - try: - return self._tw.width_of_current_line - except AttributeError: - # py < 1.6.0 - return self._tw.chars_on_current_line - - def pytest_collection(self): - if self.isatty: - if self.config.option.verbose >= 0: - self.write("collecting ... ", bold=True) - self._collect_report_last_write = time.time() - elif self.config.option.verbose >= 1: - self.write("collecting ... ", bold=True) - - def pytest_collectreport(self, report): - if report.failed: - self.stats.setdefault("error", []).append(report) - elif report.skipped: - self.stats.setdefault("skipped", []).append(report) - items = [x for x in report.result if isinstance(x, pytest.Item)] - self._numcollected += len(items) - if self.isatty: - self.report_collect() - - def report_collect(self, final=False): - if self.config.option.verbose < 0: - return - - if not final: - # Only write "collecting" report every 0.5s. - t = time.time() - if ( - self._collect_report_last_write is not None - and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION - ): - return - self._collect_report_last_write = t - - errors = len(self.stats.get("error", [])) - skipped = len(self.stats.get("skipped", [])) - deselected = len(self.stats.get("deselected", [])) - selected = self._numcollected - errors - skipped - deselected - if final: - line = "collected " - else: - line = "collecting " - line += ( - str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") - ) - if errors: - line += " / %d errors" % errors - if deselected: - line += " / %d deselected" % deselected - if skipped: - line += " / %d skipped" % skipped - if self._numcollected > selected > 0: - line += " / %d selected" % selected - if self.isatty: - self.rewrite(line, bold=True, erase=True) - if final: - self.write("\n") - else: - self.write_line(line) - - @pytest.hookimpl(trylast=True) - def pytest_sessionstart(self, session): - self._session = session - self._sessionstarttime = time.time() - if not self.showheader: - return - self.write_sep("=", "test session starts", bold=True) - verinfo = platform.python_version() - msg = "platform %s -- Python %s" % (sys.platform, verinfo) - if hasattr(sys, "pypy_version_info"): - verinfo = ".".join(map(str, sys.pypy_version_info[:3])) - msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) - msg += ", pytest-%s, py-%s, pluggy-%s" % ( - pytest.__version__, - py.__version__, - pluggy.__version__, - ) - if ( - self.verbosity > 0 - or self.config.option.debug - or getattr(self.config.option, "pastebin", None) - ): - msg += " -- " + str(sys.executable) - self.write_line(msg) - lines = self.config.hook.pytest_report_header( - config=self.config, startdir=self.startdir - ) - self._write_report_lines_from_hooks(lines) - - def _write_report_lines_from_hooks(self, lines): - lines.reverse() - for line in collapse(lines): - self.write_line(line) - - def pytest_report_header(self, config): - line = "rootdir: %s" % config.rootdir - - if config.inifile: - line += ", inifile: " + config.rootdir.bestrelpath(config.inifile) - - testpaths = config.getini("testpaths") - if testpaths and config.args == testpaths: - rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths] - line += ", testpaths: {}".format(", ".join(rel_paths)) - result = [line] - - plugininfo = config.pluginmanager.list_plugin_distinfo() - if plugininfo: - result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) - return result - - def pytest_collection_finish(self, session): - self.report_collect(True) - - if self.config.getoption("collectonly"): - self._printcollecteditems(session.items) - - lines = self.config.hook.pytest_report_collectionfinish( - config=self.config, startdir=self.startdir, items=session.items - ) - self._write_report_lines_from_hooks(lines) - - if self.config.getoption("collectonly"): - if self.stats.get("failed"): - self._tw.sep("!", "collection failures") - for rep in self.stats.get("failed"): - rep.toterminal(self._tw) - - def _printcollecteditems(self, items): - # to print out items and their parent collectors - # we take care to leave out Instances aka () - # because later versions are going to get rid of them anyway - if self.config.option.verbose < 0: - if self.config.option.verbose < -1: - counts = {} - for item in items: - name = item.nodeid.split("::", 1)[0] - counts[name] = counts.get(name, 0) + 1 - for name, count in sorted(counts.items()): - self._tw.line("%s: %d" % (name, count)) - else: - for item in items: - self._tw.line(item.nodeid) - return - stack = [] - indent = "" - for item in items: - needed_collectors = item.listchain()[1:] # strip root node - while stack: - if stack == needed_collectors[: len(stack)]: - break - stack.pop() - for col in needed_collectors[len(stack) :]: - stack.append(col) - if col.name == "()": # Skip Instances. - continue - indent = (len(stack) - 1) * " " - self._tw.line("%s%s" % (indent, col)) - if self.config.option.verbose >= 1: - if hasattr(col, "_obj") and col._obj.__doc__: - for line in col._obj.__doc__.strip().splitlines(): - self._tw.line("%s%s" % (indent + " ", line.strip())) - - @pytest.hookimpl(hookwrapper=True) - def pytest_sessionfinish(self, exitstatus): - outcome = yield - outcome.get_result() - self._tw.line("") - summary_exit_codes = ( - EXIT_OK, - EXIT_TESTSFAILED, - EXIT_INTERRUPTED, - EXIT_USAGEERROR, - EXIT_NOTESTSCOLLECTED, - ) - if exitstatus in summary_exit_codes: - self.config.hook.pytest_terminal_summary( - terminalreporter=self, exitstatus=exitstatus, config=self.config - ) - if exitstatus == EXIT_INTERRUPTED: - self._report_keyboardinterrupt() - del self._keyboardinterrupt_memo - self.summary_stats() - - @pytest.hookimpl(hookwrapper=True) - def pytest_terminal_summary(self): - self.summary_errors() - self.summary_failures() - self.summary_warnings() - self.summary_passes() - yield - self.short_test_summary() - # Display any extra warnings from teardown here (if any). - self.summary_warnings() - - def pytest_keyboard_interrupt(self, excinfo): - self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) - - def pytest_unconfigure(self): - if hasattr(self, "_keyboardinterrupt_memo"): - self._report_keyboardinterrupt() - - def _report_keyboardinterrupt(self): - excrepr = self._keyboardinterrupt_memo - msg = excrepr.reprcrash.message - self.write_sep("!", msg) - if "KeyboardInterrupt" in msg: - if self.config.option.fulltrace: - excrepr.toterminal(self._tw) - else: - excrepr.reprcrash.toterminal(self._tw) - self._tw.line( - "(to show a full traceback on KeyboardInterrupt use --fulltrace)", - yellow=True, - ) - - def _locationline(self, nodeid, fspath, lineno, domain): - def mkrel(nodeid): - line = self.config.cwd_relative_nodeid(nodeid) - if domain and line.endswith(domain): - line = line[: -len(domain)] - values = domain.split("[") - values[0] = values[0].replace(".", "::") # don't replace '.' in params - line += "[".join(values) - return line - - # collect_fspath comes from testid which has a "/"-normalized path - - if fspath: - res = mkrel(nodeid) - if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( - "\\", nodes.SEP - ): - res += " <- " + self.startdir.bestrelpath(fspath) - else: - res = "[location]" - return res + " " - - def _getfailureheadline(self, rep): - head_line = rep.head_line - if head_line: - return head_line - return "test session" # XXX? - - def _getcrashline(self, rep): - try: - return str(rep.longrepr.reprcrash) - except AttributeError: - try: - return str(rep.longrepr)[:50] - except AttributeError: - return "" - - # - # summaries for sessionfinish - # - def getreports(self, name): - values = [] - for x in self.stats.get(name, []): - if not hasattr(x, "_pdbshown"): - values.append(x) - return values - - def summary_warnings(self): - if self.hasopt("w"): - all_warnings = self.stats.get("warnings") - if not all_warnings: - return - - final = hasattr(self, "_already_displayed_warnings") - if final: - warning_reports = all_warnings[self._already_displayed_warnings :] - else: - warning_reports = all_warnings - self._already_displayed_warnings = len(warning_reports) - if not warning_reports: - return - - reports_grouped_by_message = collections.OrderedDict() - for wr in warning_reports: - reports_grouped_by_message.setdefault(wr.message, []).append(wr) - - title = "warnings summary (final)" if final else "warnings summary" - self.write_sep("=", title, yellow=True, bold=False) - for message, warning_reports in reports_grouped_by_message.items(): - has_any_location = False - for w in warning_reports: - location = w.get_location(self.config) - if location: - self._tw.line(str(location)) - has_any_location = True - if has_any_location: - lines = message.splitlines() - indented = "\n".join(" " + x for x in lines) - message = indented.rstrip() - else: - message = message.rstrip() - self._tw.line(message) - self._tw.line() - self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html") - - def summary_passes(self): - if self.config.option.tbstyle != "no": - if self.hasopt("P"): - reports = self.getreports("passed") - if not reports: - return - self.write_sep("=", "PASSES") - for rep in reports: - if rep.sections: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg, green=True, bold=True) - self._outrep_summary(rep) - - def print_teardown_sections(self, rep): - showcapture = self.config.option.showcapture - if showcapture == "no": - return - for secname, content in rep.sections: - if showcapture != "all" and showcapture not in secname: - continue - if "teardown" in secname: - self._tw.sep("-", secname) - if content[-1:] == "\n": - content = content[:-1] - self._tw.line(content) - - def summary_failures(self): - if self.config.option.tbstyle != "no": - reports = self.getreports("failed") - if not reports: - return - self.write_sep("=", "FAILURES") - if self.config.option.tbstyle == "line": - for rep in reports: - line = self._getcrashline(rep) - self.write_line(line) - else: - teardown_sections = {} - for report in self.getreports(""): - if report.when == "teardown": - teardown_sections.setdefault(report.nodeid, []).append(report) - - for rep in reports: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg, red=True, bold=True) - self._outrep_summary(rep) - for report in teardown_sections.get(rep.nodeid, []): - self.print_teardown_sections(report) - - def summary_errors(self): - if self.config.option.tbstyle != "no": - reports = self.getreports("error") - if not reports: - return - self.write_sep("=", "ERRORS") - for rep in self.stats["error"]: - msg = self._getfailureheadline(rep) - if rep.when == "collect": - msg = "ERROR collecting " + msg - else: - msg = "ERROR at %s of %s" % (rep.when, msg) - self.write_sep("_", msg, red=True, bold=True) - self._outrep_summary(rep) - - def _outrep_summary(self, rep): - rep.toterminal(self._tw) - showcapture = self.config.option.showcapture - if showcapture == "no": - return - for secname, content in rep.sections: - if showcapture != "all" and showcapture not in secname: - continue - self._tw.sep("-", secname) - if content[-1:] == "\n": - content = content[:-1] - self._tw.line(content) - - def summary_stats(self): - session_duration = time.time() - self._sessionstarttime - (line, color) = build_summary_stats_line(self.stats) - msg = "%s in %.2f seconds" % (line, session_duration) - markup = {color: True, "bold": True} - - if self.verbosity >= 0: - self.write_sep("=", msg, **markup) - if self.verbosity == -1: - self.write_line(msg, **markup) - - def short_test_summary(self): - if not self.reportchars: - return - - def show_simple(stat, lines): - failed = self.stats.get(stat, []) - if not failed: - return - termwidth = self.writer.fullwidth - config = self.config - for rep in failed: - line = _get_line_with_reprcrash_message(config, rep, termwidth) - lines.append(line) - - def show_xfailed(lines): - xfailed = self.stats.get("xfailed", []) - for rep in xfailed: - verbose_word = rep._get_verbose_word(self.config) - pos = _get_pos(self.config, rep) - lines.append("%s %s" % (verbose_word, pos)) - reason = rep.wasxfail - if reason: - lines.append(" " + str(reason)) - - def show_xpassed(lines): - xpassed = self.stats.get("xpassed", []) - for rep in xpassed: - verbose_word = rep._get_verbose_word(self.config) - pos = _get_pos(self.config, rep) - reason = rep.wasxfail - lines.append("%s %s %s" % (verbose_word, pos, reason)) - - def show_skipped(lines): - skipped = self.stats.get("skipped", []) - fskips = _folded_skips(skipped) if skipped else [] - if not fskips: - return - verbose_word = skipped[0]._get_verbose_word(self.config) - for num, fspath, lineno, reason in fskips: - if reason.startswith("Skipped: "): - reason = reason[9:] - if lineno is not None: - lines.append( - "%s [%d] %s:%d: %s" - % (verbose_word, num, fspath, lineno + 1, reason) - ) - else: - lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) - - REPORTCHAR_ACTIONS = { - "x": show_xfailed, - "X": show_xpassed, - "f": partial(show_simple, "failed"), - "s": show_skipped, - "p": partial(show_simple, "passed"), - "E": partial(show_simple, "error"), - } - - lines = [] - for char in self.reportchars: - action = REPORTCHAR_ACTIONS.get(char) - if action: # skipping e.g. "P" (passed with output) here. - action(lines) - - if lines: - self.write_sep("=", "short test summary info") - for line in lines: - self.write_line(line) - - -def _get_pos(config, rep): - nodeid = config.cwd_relative_nodeid(rep.nodeid) - return nodeid - - -def _get_line_with_reprcrash_message(config, rep, termwidth): - """Get summary line for a report, trying to add reprcrash message.""" - from wcwidth import wcswidth - - verbose_word = rep._get_verbose_word(config) - pos = _get_pos(config, rep) - - line = "%s %s" % (verbose_word, pos) - len_line = wcswidth(line) - ellipsis, len_ellipsis = "...", 3 - if len_line > termwidth - len_ellipsis: - # No space for an additional message. - return line - - try: - msg = rep.longrepr.reprcrash.message - except AttributeError: - pass - else: - # Only use the first line. - i = msg.find("\n") - if i != -1: - msg = msg[:i] - len_msg = wcswidth(msg) - - sep, len_sep = " - ", 3 - max_len_msg = termwidth - len_line - len_sep - if max_len_msg >= len_ellipsis: - if len_msg > max_len_msg: - max_len_msg -= len_ellipsis - msg = msg[:max_len_msg] - while wcswidth(msg) > max_len_msg: - msg = msg[:-1] - if six.PY2: - # on python 2 systems with narrow unicode compilation, trying to - # get a single character out of a multi-byte unicode character such as - # u'😄' will result in a High Surrogate (U+D83D) character, which is - # rendered as u'�'; in this case we just strip that character out as it - # serves no purpose being rendered - try: - surrogate = six.unichr(0xD83D) - msg = msg.rstrip(surrogate) - except ValueError: # pragma: no cover - # Jython cannot represent this lone surrogate at all (#5256): - # ValueError: unichr() arg is a lone surrogate in range - # (0xD800, 0xDFFF) (Jython UTF-16 encoding) - # ignore this case as it shouldn't appear in the string anyway - pass - msg += ellipsis - line += sep + msg - return line - - -def _folded_skips(skipped): - d = {} - for event in skipped: - key = event.longrepr - assert len(key) == 3, (event, key) - keywords = getattr(event, "keywords", {}) - # folding reports with global pytestmark variable - # this is workaround, because for now we cannot identify the scope of a skip marker - # TODO: revisit after marks scope would be fixed - if ( - event.when == "setup" - and "skip" in keywords - and "pytestmark" not in keywords - ): - key = (key[0], None, key[2]) - d.setdefault(key, []).append(event) - values = [] - for key, events in d.items(): - values.append((len(events),) + key) - return values - - -def build_summary_stats_line(stats): - known_types = ( - "failed passed skipped deselected xfailed xpassed warnings error".split() - ) - unknown_type_seen = False - for found_type in stats: - if found_type not in known_types: - if found_type: # setup/teardown reports have an empty key, ignore them - known_types.append(found_type) - unknown_type_seen = True - parts = [] - for key in known_types: - reports = stats.get(key, None) - if reports: - count = sum( - 1 for rep in reports if getattr(rep, "count_towards_summary", True) - ) - parts.append("%d %s" % (count, key)) - - if parts: - line = ", ".join(parts) - else: - line = "no tests ran" - - if "failed" in stats or "error" in stats: - color = "red" - elif "warnings" in stats or unknown_type_seen: - color = "yellow" - elif "passed" in stats: - color = "green" - else: - color = "yellow" - - return line, color - - -def _plugin_nameversions(plugininfo): - values = [] - for plugin, dist in plugininfo: - # gets us name and version! - name = "{dist.project_name}-{dist.version}".format(dist=dist) - # questionable convenience, but it keeps things short - if name.startswith("pytest-"): - name = name[7:] - # we decided to print python package names - # they can have more than one plugin - if name not in values: - values.append(name) - return values diff --git a/contrib/python/pytest/py2/_pytest/tmpdir.py b/contrib/python/pytest/py2/_pytest/tmpdir.py deleted file mode 100644 index a8a7037713..0000000000 --- a/contrib/python/pytest/py2/_pytest/tmpdir.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- -""" support for providing temporary directories to test functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import re -import tempfile -import warnings - -import attr -import py -import six - -import pytest -from .pathlib import ensure_reset_dir -from .pathlib import LOCK_TIMEOUT -from .pathlib import make_numbered_dir -from .pathlib import make_numbered_dir_with_cleanup -from .pathlib import Path -from _pytest.monkeypatch import MonkeyPatch - - -@attr.s -class TempPathFactory(object): - """Factory for temporary directories under the common base temp directory. - - The base directory can be configured using the ``--basetemp`` option.""" - - _given_basetemp = attr.ib( - # using os.path.abspath() to get absolute path instead of resolve() as it - # does not work the same in all platforms (see #4427) - # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012) - converter=attr.converters.optional( - lambda p: Path(os.path.abspath(six.text_type(p))) - ) - ) - _trace = attr.ib() - _basetemp = attr.ib(default=None) - - @classmethod - def from_config(cls, config): - """ - :param config: a pytest configuration - """ - return cls( - given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir") - ) - - def mktemp(self, basename, numbered=True): - """makes a temporary directory managed by the factory""" - if not numbered: - p = self.getbasetemp().joinpath(basename) - p.mkdir() - else: - p = make_numbered_dir(root=self.getbasetemp(), prefix=basename) - self._trace("mktemp", p) - return p - - def getbasetemp(self): - """ return base temporary directory. """ - if self._basetemp is not None: - return self._basetemp - - if self._given_basetemp is not None: - basetemp = self._given_basetemp - ensure_reset_dir(basetemp) - basetemp = basetemp.resolve() - else: - from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") - temproot = Path(from_env or tempfile.gettempdir()).resolve() - user = get_user() or "unknown" - # use a sub-directory in the temproot to speed-up - # make_numbered_dir() call - rootdir = temproot.joinpath("pytest-of-{}".format(user)) - rootdir.mkdir(exist_ok=True) - basetemp = make_numbered_dir_with_cleanup( - prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT - ) - assert basetemp is not None, basetemp - self._basetemp = t = basetemp - self._trace("new basetemp", t) - return t - - -@attr.s -class TempdirFactory(object): - """ - backward comptibility wrapper that implements - :class:``py.path.local`` for :class:``TempPathFactory`` - """ - - _tmppath_factory = attr.ib() - - def ensuretemp(self, string, dir=1): - """ (deprecated) return temporary directory path with - the given string as the trailing part. It is usually - better to use the 'tmpdir' function argument which - provides an empty unique-per-test-invocation directory - and is guaranteed to be empty. - """ - # py.log._apiwarn(">1.1", "use tmpdir function argument") - from .deprecated import PYTEST_ENSURETEMP - - warnings.warn(PYTEST_ENSURETEMP, stacklevel=2) - return self.getbasetemp().ensure(string, dir=dir) - - def mktemp(self, basename, numbered=True): - """Create a subdirectory of the base temporary directory and return it. - If ``numbered``, ensure the directory is unique by adding a number - prefix greater than any existing one. - """ - return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) - - def getbasetemp(self): - """backward compat wrapper for ``_tmppath_factory.getbasetemp``""" - return py.path.local(self._tmppath_factory.getbasetemp().resolve()) - - -def get_user(): - """Return the current user name, or None if getuser() does not work - in the current environment (see #1010). - """ - import getpass - - try: - return getpass.getuser() - except (ImportError, KeyError): - return None - - -def pytest_configure(config): - """Create a TempdirFactory and attach it to the config object. - - This is to comply with existing plugins which expect the handler to be - available at pytest_configure time, but ideally should be moved entirely - to the tmpdir_factory session fixture. - """ - mp = MonkeyPatch() - tmppath_handler = TempPathFactory.from_config(config) - t = TempdirFactory(tmppath_handler) - config._cleanup.append(mp.undo) - mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) - mp.setattr(config, "_tmpdirhandler", t, raising=False) - mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) - - -@pytest.fixture(scope="session") -def tmpdir_factory(request): - """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. - """ - return request.config._tmpdirhandler - - -@pytest.fixture(scope="session") -def tmp_path_factory(request): - """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. - """ - return request.config._tmp_path_factory - - -def _mk_tmp(request, factory): - name = request.node.name - name = re.sub(r"[\W]", "_", name) - MAXVAL = 30 - name = name[:MAXVAL] - return factory.mktemp(name, numbered=True) - - -@pytest.fixture -def tmpdir(tmp_path): - """Return a temporary directory path object - which is unique to each test function invocation, - created as a sub directory of the base temporary - directory. The returned object is a `py.path.local`_ - path object. - - .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - """ - return py.path.local(tmp_path) - - -@pytest.fixture -def tmp_path(request, tmp_path_factory): - """Return a temporary directory path object - which is unique to each test function invocation, - created as a sub directory of the base temporary - directory. The returned object is a :class:`pathlib.Path` - object. - - .. note:: - - in python < 3.6 this is a pathlib2.Path - """ - - return _mk_tmp(request, tmp_path_factory) diff --git a/contrib/python/pytest/py2/_pytest/unittest.py b/contrib/python/pytest/py2/_pytest/unittest.py deleted file mode 100644 index 3ff6f45d8d..0000000000 --- a/contrib/python/pytest/py2/_pytest/unittest.py +++ /dev/null @@ -1,291 +0,0 @@ -# -*- coding: utf-8 -*- -""" discovery and running of std-library "unittest" style tests. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import traceback - -import _pytest._code -import pytest -from _pytest.compat import getimfunc -from _pytest.config import hookimpl -from _pytest.outcomes import fail -from _pytest.outcomes import skip -from _pytest.outcomes import xfail -from _pytest.python import Class -from _pytest.python import Function - - -def pytest_pycollect_makeitem(collector, name, obj): - # has unittest been imported and is obj a subclass of its TestCase? - try: - if not issubclass(obj, sys.modules["unittest"].TestCase): - return - except Exception: - return - # yes, so let's collect it - return UnitTestCase(name, parent=collector) - - -class UnitTestCase(Class): - # marker for fixturemanger.getfixtureinfo() - # to declare that our children do not support funcargs - nofuncargs = True - - def collect(self): - from unittest import TestLoader - - cls = self.obj - if not getattr(cls, "__test__", True): - return - - skipped = getattr(cls, "__unittest_skip__", False) - if not skipped: - self._inject_setup_teardown_fixtures(cls) - self._inject_setup_class_fixture() - - self.session._fixturemanager.parsefactories(self, unittest=True) - loader = TestLoader() - foundsomething = False - for name in loader.getTestCaseNames(self.obj): - x = getattr(self.obj, name) - if not getattr(x, "__test__", True): - continue - funcobj = getimfunc(x) - yield TestCaseFunction(name, parent=self, callobj=funcobj) - foundsomething = True - - if not foundsomething: - runtest = getattr(self.obj, "runTest", None) - if runtest is not None: - ut = sys.modules.get("twisted.trial.unittest", None) - if ut is None or runtest != ut.TestCase.runTest: - yield TestCaseFunction("runTest", parent=self) - - def _inject_setup_teardown_fixtures(self, cls): - """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding - teardown functions (#517)""" - class_fixture = _make_xunit_fixture( - cls, "setUpClass", "tearDownClass", scope="class", pass_self=False - ) - if class_fixture: - cls.__pytest_class_setup = class_fixture - - method_fixture = _make_xunit_fixture( - cls, "setup_method", "teardown_method", scope="function", pass_self=True - ) - if method_fixture: - cls.__pytest_method_setup = method_fixture - - -def _make_xunit_fixture(obj, setup_name, teardown_name, scope, pass_self): - setup = getattr(obj, setup_name, None) - teardown = getattr(obj, teardown_name, None) - if setup is None and teardown is None: - return None - - @pytest.fixture(scope=scope, autouse=True) - def fixture(self, request): - if getattr(self, "__unittest_skip__", None): - reason = self.__unittest_skip_why__ - pytest.skip(reason) - if setup is not None: - if pass_self: - setup(self, request.function) - else: - setup() - yield - if teardown is not None: - if pass_self: - teardown(self, request.function) - else: - teardown() - - return fixture - - -class TestCaseFunction(Function): - nofuncargs = True - _excinfo = None - _testcase = None - - def setup(self): - self._testcase = self.parent.obj(self.name) - self._fix_unittest_skip_decorator() - self._obj = getattr(self._testcase, self.name) - if hasattr(self, "_request"): - self._request._fillfixtures() - - def _fix_unittest_skip_decorator(self): - """ - The @unittest.skip decorator calls functools.wraps(self._testcase) - The call to functools.wraps() fails unless self._testcase - has a __name__ attribute. This is usually automatically supplied - if the test is a function or method, but we need to add manually - here. - - See issue #1169 - """ - if sys.version_info[0] == 2: - setattr(self._testcase, "__name__", self.name) - - def teardown(self): - self._testcase = None - self._obj = None - - def startTest(self, testcase): - pass - - def _addexcinfo(self, rawexcinfo): - # unwrap potential exception info (see twisted trial support below) - rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) - try: - excinfo = _pytest._code.ExceptionInfo(rawexcinfo) - # invoke the attributes to trigger storing the traceback - # trial causes some issue there - excinfo.value - excinfo.traceback - except TypeError: - try: - try: - values = traceback.format_exception(*rawexcinfo) - values.insert( - 0, - "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n", - ) - fail("".join(values), pytrace=False) - except (fail.Exception, KeyboardInterrupt): - raise - except: # noqa - fail( - "ERROR: Unknown Incompatible Exception " - "representation:\n%r" % (rawexcinfo,), - pytrace=False, - ) - except KeyboardInterrupt: - raise - except fail.Exception: - excinfo = _pytest._code.ExceptionInfo.from_current() - self.__dict__.setdefault("_excinfo", []).append(excinfo) - - def addError(self, testcase, rawexcinfo): - self._addexcinfo(rawexcinfo) - - def addFailure(self, testcase, rawexcinfo): - self._addexcinfo(rawexcinfo) - - def addSkip(self, testcase, reason): - try: - skip(reason) - except skip.Exception: - self._skipped_by_mark = True - self._addexcinfo(sys.exc_info()) - - def addExpectedFailure(self, testcase, rawexcinfo, reason=""): - try: - xfail(str(reason)) - except xfail.Exception: - self._addexcinfo(sys.exc_info()) - - def addUnexpectedSuccess(self, testcase, reason=""): - self._unexpectedsuccess = reason - - def addSuccess(self, testcase): - pass - - def stopTest(self, testcase): - pass - - def _handle_skip(self): - # implements the skipping machinery (see #2137) - # analog to pythons Lib/unittest/case.py:run - testMethod = getattr(self._testcase, self._testcase._testMethodName) - if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr( - testMethod, "__unittest_skip__", False - ): - # If the class or method was skipped. - skip_why = getattr( - self._testcase.__class__, "__unittest_skip_why__", "" - ) or getattr(testMethod, "__unittest_skip_why__", "") - try: # PY3, unittest2 on PY2 - self._testcase._addSkip(self, self._testcase, skip_why) - except TypeError: # PY2 - if sys.version_info[0] != 2: - raise - self._testcase._addSkip(self, skip_why) - return True - return False - - def runtest(self): - if self.config.pluginmanager.get_plugin("pdbinvoke") is None: - self._testcase(result=self) - else: - # disables tearDown and cleanups for post mortem debugging (see #1890) - if self._handle_skip(): - return - self._testcase.debug() - - def _prunetraceback(self, excinfo): - Function._prunetraceback(self, excinfo) - traceback = excinfo.traceback.filter( - lambda x: not x.frame.f_globals.get("__unittest") - ) - if traceback: - excinfo.traceback = traceback - - -@hookimpl(tryfirst=True) -def pytest_runtest_makereport(item, call): - if isinstance(item, TestCaseFunction): - if item._excinfo: - call.excinfo = item._excinfo.pop(0) - try: - del call.result - except AttributeError: - pass - - -# twisted trial support - - -@hookimpl(hookwrapper=True) -def pytest_runtest_protocol(item): - if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: - ut = sys.modules["twisted.python.failure"] - Failure__init__ = ut.Failure.__init__ - check_testcase_implements_trial_reporter() - - def excstore( - self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None - ): - if exc_value is None: - self._rawexcinfo = sys.exc_info() - else: - if exc_type is None: - exc_type = type(exc_value) - self._rawexcinfo = (exc_type, exc_value, exc_tb) - try: - Failure__init__( - self, exc_value, exc_type, exc_tb, captureVars=captureVars - ) - except TypeError: - Failure__init__(self, exc_value, exc_type, exc_tb) - - ut.Failure.__init__ = excstore - yield - ut.Failure.__init__ = Failure__init__ - else: - yield - - -def check_testcase_implements_trial_reporter(done=[]): - if done: - return - from zope.interface import classImplements - from twisted.trial.itrial import IReporter - - classImplements(TestCaseFunction, IReporter) - done.append(1) diff --git a/contrib/python/pytest/py2/_pytest/warning_types.py b/contrib/python/pytest/py2/_pytest/warning_types.py deleted file mode 100644 index 861010a127..0000000000 --- a/contrib/python/pytest/py2/_pytest/warning_types.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -import attr - - -class PytestWarning(UserWarning): - """ - Bases: :class:`UserWarning`. - - Base class for all warnings emitted by pytest. - """ - - -class PytestAssertRewriteWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted by the pytest assert rewrite module. - """ - - -class PytestCacheWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted by the cache plugin in various situations. - """ - - -class PytestConfigWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted for configuration issues. - """ - - -class PytestCollectionWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted when pytest is not able to collect a file or symbol in a module. - """ - - -class PytestDeprecationWarning(PytestWarning, DeprecationWarning): - """ - Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. - - Warning class for features that will be removed in a future version. - """ - - -class PytestExperimentalApiWarning(PytestWarning, FutureWarning): - """ - Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. - - Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be - removed completely in future version - """ - - @classmethod - def simple(cls, apiname): - return cls( - "{apiname} is an experimental api that may change over time".format( - apiname=apiname - ) - ) - - -class PytestUnhandledCoroutineWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted when pytest encounters a test function which is a coroutine, - but it was not handled by any async-aware plugin. Coroutine test functions - are not natively supported. - """ - - -class PytestUnknownMarkWarning(PytestWarning): - """ - Bases: :class:`PytestWarning`. - - Warning emitted on use of unknown markers. - See https://docs.pytest.org/en/latest/mark.html for details. - """ - - -class RemovedInPytest4Warning(PytestDeprecationWarning): - """ - Bases: :class:`pytest.PytestDeprecationWarning`. - - Warning class for features scheduled to be removed in pytest 4.0. - """ - - -@attr.s -class UnformattedWarning(object): - """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. - - Using this class avoids to keep all the warning types and messages in this module, avoiding misuse. - """ - - category = attr.ib() - template = attr.ib() - - def format(self, **kwargs): - """Returns an instance of the warning category, formatted with given kwargs""" - return self.category(self.template.format(**kwargs)) - - -PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example") diff --git a/contrib/python/pytest/py2/_pytest/warnings.py b/contrib/python/pytest/py2/_pytest/warnings.py deleted file mode 100644 index a3debae462..0000000000 --- a/contrib/python/pytest/py2/_pytest/warnings.py +++ /dev/null @@ -1,180 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import warnings -from contextlib import contextmanager - -import pytest -from _pytest import compat - -SHOW_PYTEST_WARNINGS_ARG = "-Walways::pytest.RemovedInPytest4Warning" - - -def _setoption(wmod, arg): - """ - Copy of the warning._setoption function but does not escape arguments. - """ - parts = arg.split(":") - if len(parts) > 5: - raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) - while len(parts) < 5: - parts.append("") - action, message, category, module, lineno = [s.strip() for s in parts] - action = wmod._getaction(action) - category = wmod._getcategory(category) - if lineno: - try: - lineno = int(lineno) - if lineno < 0: - raise ValueError - except (ValueError, OverflowError): - raise wmod._OptionError("invalid lineno %r" % (lineno,)) - else: - lineno = 0 - wmod.filterwarnings(action, message, category, module, lineno) - - -def pytest_addoption(parser): - group = parser.getgroup("pytest-warnings") - group.addoption( - "-W", - "--pythonwarnings", - action="append", - help="set which warnings to report, see -W option of python itself.", - ) - parser.addini( - "filterwarnings", - type="linelist", - help="Each line specifies a pattern for " - "warnings.filterwarnings. " - "Processed after -W and --pythonwarnings.", - ) - - -def pytest_configure(config): - config.addinivalue_line( - "markers", - "filterwarnings(warning): add a warning filter to the given test. " - "see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ", - ) - - -@contextmanager -def catch_warnings_for_item(config, ihook, when, item): - """ - Context manager that catches warnings generated in the contained execution block. - - ``item`` can be None if we are not in the context of an item execution. - - Each warning captured triggers the ``pytest_warning_captured`` hook. - """ - cmdline_filters = config.getoption("pythonwarnings") or [] - inifilters = config.getini("filterwarnings") - with warnings.catch_warnings(record=True) as log: - - if not sys.warnoptions: - # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908) - warnings.filterwarnings("always", category=DeprecationWarning) - warnings.filterwarnings("always", category=PendingDeprecationWarning) - - warnings.filterwarnings("error", category=pytest.RemovedInPytest4Warning) - - # filters should have this precedence: mark, cmdline options, ini - # filters should be applied in the inverse order of precedence - for arg in inifilters: - _setoption(warnings, arg) - - for arg in cmdline_filters: - warnings._setoption(arg) - - if item is not None: - for mark in item.iter_markers(name="filterwarnings"): - for arg in mark.args: - _setoption(warnings, arg) - - yield - - for warning_message in log: - ihook.pytest_warning_captured.call_historic( - kwargs=dict(warning_message=warning_message, when=when, item=item) - ) - - -def warning_record_to_str(warning_message): - """Convert a warnings.WarningMessage to a string. - - This takes lot of unicode shenaningans into account for Python 2. - When Python 2 support is dropped this function can be greatly simplified. - """ - warn_msg = warning_message.message - unicode_warning = False - if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): - new_args = [] - for m in warn_msg.args: - new_args.append( - compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m - ) - unicode_warning = list(warn_msg.args) != new_args - warn_msg.args = new_args - - msg = warnings.formatwarning( - warn_msg, - warning_message.category, - warning_message.filename, - warning_message.lineno, - warning_message.line, - ) - if unicode_warning: - warnings.warn( - "Warning is using unicode non convertible to ascii, " - "converting to a safe representation:\n {!r}".format(compat.safe_str(msg)), - UnicodeWarning, - ) - return msg - - -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_runtest_protocol(item): - with catch_warnings_for_item( - config=item.config, ihook=item.ihook, when="runtest", item=item - ): - yield - - -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_collection(session): - config = session.config - with catch_warnings_for_item( - config=config, ihook=config.hook, when="collect", item=None - ): - yield - - -@pytest.hookimpl(hookwrapper=True) -def pytest_terminal_summary(terminalreporter): - config = terminalreporter.config - with catch_warnings_for_item( - config=config, ihook=config.hook, when="config", item=None - ): - yield - - -def _issue_warning_captured(warning, hook, stacklevel): - """ - This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: - at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured - hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. - - :param warning: the warning instance. - :param hook: the hook caller - :param stacklevel: stacklevel forwarded to warnings.warn - """ - with warnings.catch_warnings(record=True) as records: - warnings.simplefilter("always", type(warning)) - warnings.warn(warning, stacklevel=stacklevel) - hook.pytest_warning_captured.call_historic( - kwargs=dict(warning_message=records[0], when="config", item=None) - ) |