diff options
author | Devtools Arcadia <arcadia-devtools@yandex-team.ru> | 2022-02-07 18:08:42 +0300 |
---|---|---|
committer | Devtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net> | 2022-02-07 18:08:42 +0300 |
commit | 1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch) | |
tree | e26c9fed0de5d9873cce7e00bc214573dc2195b7 /library/python/pytest | |
download | ydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz |
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'library/python/pytest')
-rw-r--r-- | library/python/pytest/__init__.py | 0 | ||||
-rw-r--r-- | library/python/pytest/allure/conftest.py | 8 | ||||
-rw-r--r-- | library/python/pytest/allure/ya.make | 11 | ||||
-rw-r--r-- | library/python/pytest/context.py | 1 | ||||
-rw-r--r-- | library/python/pytest/empty/main.c | 7 | ||||
-rw-r--r-- | library/python/pytest/empty/ya.make | 12 | ||||
-rw-r--r-- | library/python/pytest/main.py | 116 | ||||
-rw-r--r-- | library/python/pytest/plugins/collection.py | 128 | ||||
-rw-r--r-- | library/python/pytest/plugins/conftests.py | 50 | ||||
-rw-r--r-- | library/python/pytest/plugins/fakeid_py2.py | 2 | ||||
-rw-r--r-- | library/python/pytest/plugins/fakeid_py3.py | 2 | ||||
-rw-r--r-- | library/python/pytest/plugins/fixtures.py | 85 | ||||
-rw-r--r-- | library/python/pytest/plugins/ya.make | 32 | ||||
-rw-r--r-- | library/python/pytest/plugins/ya.py | 963 | ||||
-rw-r--r-- | library/python/pytest/pytest.yatest.ini | 7 | ||||
-rw-r--r-- | library/python/pytest/rewrite.py | 123 | ||||
-rw-r--r-- | library/python/pytest/ya.make | 32 | ||||
-rw-r--r-- | library/python/pytest/yatest_tools.py | 304 |
18 files changed, 1883 insertions, 0 deletions
diff --git a/library/python/pytest/__init__.py b/library/python/pytest/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/library/python/pytest/__init__.py diff --git a/library/python/pytest/allure/conftest.py b/library/python/pytest/allure/conftest.py new file mode 100644 index 0000000000..0d5cfda1e5 --- /dev/null +++ b/library/python/pytest/allure/conftest.py @@ -0,0 +1,8 @@ +import os +import pytest + + +@pytest.mark.tryfirst +def pytest_configure(config): + if "ALLURE_REPORT_DIR" in os.environ: + config.option.allurereportdir = os.environ["ALLURE_REPORT_DIR"] diff --git a/library/python/pytest/allure/ya.make b/library/python/pytest/allure/ya.make new file mode 100644 index 0000000000..ab3f449c7f --- /dev/null +++ b/library/python/pytest/allure/ya.make @@ -0,0 +1,11 @@ +PY23_LIBRARY() + +OWNER(exprmntr) + +PY_SRCS(conftest.py) + +PEERDIR( + contrib/python/pytest-allure-adaptor +) + +END() diff --git a/library/python/pytest/context.py b/library/python/pytest/context.py new file mode 100644 index 0000000000..bfcdae50b5 --- /dev/null +++ b/library/python/pytest/context.py @@ -0,0 +1 @@ +Ctx = {} diff --git a/library/python/pytest/empty/main.c b/library/python/pytest/empty/main.c new file mode 100644 index 0000000000..9efa08162a --- /dev/null +++ b/library/python/pytest/empty/main.c @@ -0,0 +1,7 @@ +/* +to be used for build python tests in a stub binary for the case of using system python +*/ + +int main(void) { + return 0; +} diff --git a/library/python/pytest/empty/ya.make b/library/python/pytest/empty/ya.make new file mode 100644 index 0000000000..8f0fa37e2a --- /dev/null +++ b/library/python/pytest/empty/ya.make @@ -0,0 +1,12 @@ +LIBRARY() + +OWNER( + g:yatool + dmitko +) + +SRCS( + main.c +) + +END() diff --git a/library/python/pytest/main.py b/library/python/pytest/main.py new file mode 100644 index 0000000000..6296bd6f0f --- /dev/null +++ b/library/python/pytest/main.py @@ -0,0 +1,116 @@ +import os +import sys +import time + +import __res + +FORCE_EXIT_TESTSFAILED_ENV = 'FORCE_EXIT_TESTSFAILED' + + +def main(): + import library.python.pytest.context as context + context.Ctx["YA_PYTEST_START_TIMESTAMP"] = time.time() + + profile = None + if '--profile-pytest' in sys.argv: + sys.argv.remove('--profile-pytest') + + import pstats + import cProfile + profile = cProfile.Profile() + profile.enable() + + # Reset influencing env. vars + # For more info see library/python/testing/yatest_common/yatest/common/errors.py + if FORCE_EXIT_TESTSFAILED_ENV in os.environ: + del os.environ[FORCE_EXIT_TESTSFAILED_ENV] + + if "Y_PYTHON_CLEAR_ENTRY_POINT" in os.environ: + if "Y_PYTHON_ENTRY_POINT" in os.environ: + del os.environ["Y_PYTHON_ENTRY_POINT"] + del os.environ["Y_PYTHON_CLEAR_ENTRY_POINT"] + + listing_mode = '--collect-only' in sys.argv + yatest_runner = os.environ.get('YA_TEST_RUNNER') == '1' + + import pytest + + import library.python.pytest.plugins.collection as collection + import library.python.pytest.plugins.ya as ya + import library.python.pytest.plugins.conftests as conftests + + import _pytest.assertion + from _pytest.monkeypatch import MonkeyPatch + from . import rewrite + m = MonkeyPatch() + m.setattr(_pytest.assertion.rewrite, "AssertionRewritingHook", rewrite.AssertionRewritingHook) + + prefix = '__tests__.' + + test_modules = [ + name[len(prefix):] for name in sys.extra_modules + if name.startswith(prefix) and not name.endswith('.conftest') + ] + + doctest_packages = __res.find("PY_DOCTEST_PACKAGES") or "" + if isinstance(doctest_packages, bytes): + doctest_packages = doctest_packages.decode('utf-8') + doctest_packages = doctest_packages.split() + + def is_doctest_module(name): + for package in doctest_packages: + if name == package or name.startswith(str(package) + "."): + return True + return False + + doctest_modules = [ + name for name in sys.extra_modules + if is_doctest_module(name) + ] + + def remove_user_site(paths): + site_paths = ('site-packages', 'site-python') + + def is_site_path(path): + for p in site_paths: + if path.find(p) != -1: + return True + return False + + new_paths = list(paths) + for p in paths: + if is_site_path(p): + new_paths.remove(p) + + return new_paths + + sys.path = remove_user_site(sys.path) + rc = pytest.main(plugins=[ + collection.CollectionPlugin(test_modules, doctest_modules), + ya, + conftests, + ]) + + if rc == 5: + # don't care about EXIT_NOTESTSCOLLECTED + rc = 0 + + if rc == 1 and yatest_runner and not listing_mode and not os.environ.get(FORCE_EXIT_TESTSFAILED_ENV) == '1': + # XXX it's place for future improvements + # Test wrapper should terminate with 0 exit code if there are common test failures + # and report it with trace-file machinery. + # However, there are several case when we don't want to suppress exit_code: + # - listing machinery doesn't use trace-file currently and rely on stdout and exit_code + # - RestartTestException and InfrastructureException required non-zero exit_code to be processes correctly + rc = 0 + + if profile: + profile.disable() + ps = pstats.Stats(profile, stream=sys.stderr).sort_stats('cumulative') + ps.print_stats() + + sys.exit(rc) + + +if __name__ == '__main__': + main() diff --git a/library/python/pytest/plugins/collection.py b/library/python/pytest/plugins/collection.py new file mode 100644 index 0000000000..e36f47a78f --- /dev/null +++ b/library/python/pytest/plugins/collection.py @@ -0,0 +1,128 @@ +import os +import sys +from six import reraise + +import py + +import pytest # noqa +import _pytest.python +import _pytest.doctest +import json +import library.python.testing.filter.filter as test_filter + + +class LoadedModule(_pytest.python.Module): + def __init__(self, parent, name, **kwargs): + self.name = name + '.py' + self.session = parent + self.parent = parent + self.config = parent.config + self.keywords = {} + self.own_markers = [] + self.fspath = py.path.local() + + @classmethod + def from_parent(cls, **kwargs): + namespace = kwargs.pop('namespace', True) + kwargs.setdefault('fspath', py.path.local()) + + loaded_module = getattr(super(LoadedModule, cls), 'from_parent', cls)(**kwargs) + loaded_module.namespace = namespace + + return loaded_module + + @property + def _nodeid(self): + if os.getenv('CONFTEST_LOAD_POLICY') == 'LOCAL': + return self._getobj().__file__ + else: + return self.name + + @property + def nodeid(self): + return self._nodeid + + def _getobj(self): + module_name = self.name[:-len('.py')] + if self.namespace: + module_name = '__tests__.' + module_name + __import__(module_name) + return sys.modules[module_name] + + +class DoctestModule(LoadedModule): + + def collect(self): + import doctest + + module = self._getobj() + # uses internal doctest module parsing mechanism + finder = doctest.DocTestFinder() + optionflags = _pytest.doctest.get_optionflags(self) + runner = doctest.DebugRunner(verbose=0, optionflags=optionflags) + + try: + for test in finder.find(module, self.name[:-len('.py')]): + if test.examples: # skip empty doctests + yield getattr(_pytest.doctest.DoctestItem, 'from_parent', _pytest.doctest.DoctestItem)( + name=test.name, + parent=self, + runner=runner, + dtest=test) + except Exception: + import logging + logging.exception('DoctestModule failed, probably you can add NO_DOCTESTS() macro to ya.make') + etype, exc, tb = sys.exc_info() + msg = 'DoctestModule failed, probably you can add NO_DOCTESTS() macro to ya.make' + reraise(etype, type(exc)('{}\n{}'.format(exc, msg)), tb) + + +# NOTE: Since we are overriding collect method of pytest session, pytest hooks are not invoked during collection. +def pytest_ignore_collect(module, session, filenames_from_full_filters, accept_filename_predicate): + if session.config.option.mode == 'list': + return not accept_filename_predicate(module.name) + + if filenames_from_full_filters is not None and module.name not in filenames_from_full_filters: + return True + + test_file_filter = getattr(session.config.option, 'test_file_filter', None) + if test_file_filter is None: + return False + if module.name != test_file_filter.replace('/', '.'): + return True + return False + + +class CollectionPlugin(object): + def __init__(self, test_modules, doctest_modules): + self._test_modules = test_modules + self._doctest_modules = doctest_modules + + def pytest_sessionstart(self, session): + + def collect(*args, **kwargs): + accept_filename_predicate = test_filter.make_py_file_filter(session.config.option.test_filter) + full_test_names_file_path = session.config.option.test_list_path + filenames_filter = None + + if full_test_names_file_path and os.path.exists(full_test_names_file_path): + with open(full_test_names_file_path, 'r') as afile: + # in afile stored 2 dimensional array such that array[modulo_index] contains tests which should be run in this test suite + full_names_filter = set(json.load(afile)[int(session.config.option.modulo_index)]) + filenames_filter = set(map(lambda x: x.split('::')[0], full_names_filter)) + + for test_module in self._test_modules: + module = LoadedModule.from_parent(name=test_module, parent=session) + if not pytest_ignore_collect(module, session, filenames_filter, accept_filename_predicate): + yield module + + if os.environ.get('YA_PYTEST_DISABLE_DOCTEST', 'no') == 'no': + module = DoctestModule.from_parent(name=test_module, parent=session) + if not pytest_ignore_collect(module, session, filenames_filter, accept_filename_predicate): + yield module + + if os.environ.get('YA_PYTEST_DISABLE_DOCTEST', 'no') == 'no': + for doctest_module in self._doctest_modules: + yield DoctestModule.from_parent(name=doctest_module, parent=session, namespace=False) + + session.collect = collect diff --git a/library/python/pytest/plugins/conftests.py b/library/python/pytest/plugins/conftests.py new file mode 100644 index 0000000000..522041f5a7 --- /dev/null +++ b/library/python/pytest/plugins/conftests.py @@ -0,0 +1,50 @@ +import os +import importlib +import sys +import inspect + +from pytest import hookimpl + +from .fixtures import metrics, links # noqa + +orig_getfile = inspect.getfile + + +def getfile(object): + res = orig_getfile(object) + if inspect.ismodule(object): + if not res and getattr(object, '__orig_file__'): + res = object.__orig_file__ + return res + +inspect.getfile = getfile +conftest_modules = [] + + +@hookimpl(trylast=True) +def pytest_load_initial_conftests(early_config, parser, args): + conftests = filter(lambda name: name.endswith(".conftest"), sys.extra_modules) + + def conftest_key(name): + if not name.startswith("__tests__."): + # Make __tests__ come last + return "_." + name + return name + + for name in sorted(conftests, key=conftest_key): + mod = importlib.import_module(name) + if os.getenv("CONFTEST_LOAD_POLICY") != "LOCAL": + mod.__orig_file__ = mod.__file__ + mod.__file__ = "" + conftest_modules.append(mod) + early_config.pluginmanager.consider_conftest(mod) + + +def getconftestmodules(*args, **kwargs): + return conftest_modules + + +def pytest_sessionstart(session): + # Override filesystem based relevant conftest discovery on the call path + assert session.config.pluginmanager + session.config.pluginmanager._getconftestmodules = getconftestmodules diff --git a/library/python/pytest/plugins/fakeid_py2.py b/library/python/pytest/plugins/fakeid_py2.py new file mode 100644 index 0000000000..8b26148e2e --- /dev/null +++ b/library/python/pytest/plugins/fakeid_py2.py @@ -0,0 +1,2 @@ +# Inc this number to change uid for every PYTEST() target +fake_id = 0 diff --git a/library/python/pytest/plugins/fakeid_py3.py b/library/python/pytest/plugins/fakeid_py3.py new file mode 100644 index 0000000000..247cc8b29d --- /dev/null +++ b/library/python/pytest/plugins/fakeid_py3.py @@ -0,0 +1,2 @@ +# Inc this number to change uid for every PY3TEST() target +fake_id = 10 diff --git a/library/python/pytest/plugins/fixtures.py b/library/python/pytest/plugins/fixtures.py new file mode 100644 index 0000000000..6f7e0a27e4 --- /dev/null +++ b/library/python/pytest/plugins/fixtures.py @@ -0,0 +1,85 @@ +import os +import pytest +import six + + +MAX_ALLOWED_LINKS_COUNT = 10 + + +@pytest.fixture +def metrics(request): + + class Metrics(object): + @classmethod + def set(cls, name, value): + assert len(name) <= 128, "Length of the metric name must less than 128" + assert type(value) in [int, float], "Metric value must be of type int or float" + test_name = request.node.nodeid + if test_name not in request.config.test_metrics: + request.config.test_metrics[test_name] = {} + request.config.test_metrics[test_name][name] = value + + @classmethod + def set_benchmark(cls, benchmark_values): + # report of google has key 'benchmarks' which is a list of benchmark results + # yandex benchmark has key 'benchmark', which is a list of benchmark results + # use this to differentiate which kind of result it is + if 'benchmarks' in benchmark_values: + cls.set_gbenchmark(benchmark_values) + else: + cls.set_ybenchmark(benchmark_values) + + @classmethod + def set_ybenchmark(cls, benchmark_values): + for benchmark in benchmark_values["benchmark"]: + name = benchmark["name"] + for key, value in six.iteritems(benchmark): + if key != "name": + cls.set("{}_{}".format(name, key), value) + + @classmethod + def set_gbenchmark(cls, benchmark_values): + time_unit_multipliers = {"ns": 1, "us": 1000, "ms": 1000000} + time_keys = {"real_time", "cpu_time"} + ignore_keys = {"name", "run_name", "time_unit", "run_type", "repetition_index"} + for benchmark in benchmark_values["benchmarks"]: + name = benchmark["name"].replace('/', '_') # ci does not work properly with '/' in metric name + time_unit_mult = time_unit_multipliers[benchmark.get("time_unit", "ns")] + for k, v in six.iteritems(benchmark): + if k in time_keys: + cls.set("{}_{}".format(name, k), v * time_unit_mult) + elif k not in ignore_keys and isinstance(v, (float, int)): + cls.set("{}_{}".format(name, k), v) + return Metrics + + +@pytest.fixture +def links(request): + + class Links(object): + @classmethod + def set(cls, name, path): + + if len(request.config.test_logs[request.node.nodeid]) >= MAX_ALLOWED_LINKS_COUNT: + raise Exception("Cannot add more than {} links to test".format(MAX_ALLOWED_LINKS_COUNT)) + + reserved_names = ["log", "logsdir", "stdout", "stderr"] + if name in reserved_names: + raise Exception("Attachment name should not belong to the reserved list: {}".format(", ".join(reserved_names))) + output_dir = request.config.ya.output_dir + + if not os.path.exists(path): + raise Exception("Path to be attached does not exist: {}".format(path)) + + if os.path.isabs(path) and ".." in os.path.relpath(path, output_dir): + raise Exception("Test attachment must be inside yatest.common.output_path()") + + request.config.test_logs[request.node.nodeid][name] = path + + @classmethod + def get(cls, name): + if name not in request.config.test_logs[request.node.nodeid]: + raise KeyError("Attachment with name '{}' does not exist".format(name)) + return request.config.test_logs[request.node.nodeid][name] + + return Links diff --git a/library/python/pytest/plugins/ya.make b/library/python/pytest/plugins/ya.make new file mode 100644 index 0000000000..c15d6f759d --- /dev/null +++ b/library/python/pytest/plugins/ya.make @@ -0,0 +1,32 @@ +OWNER(g:yatest) + +PY23_LIBRARY() + +PY_SRCS( + ya.py + collection.py + conftests.py + fixtures.py +) + +PEERDIR( + library/python/filelock + library/python/find_root + library/python/testing/filter +) + +IF (PYTHON2) + PY_SRCS( + fakeid_py2.py + ) + + PEERDIR( + contrib/python/faulthandler + ) +ELSE() + PY_SRCS( + fakeid_py3.py + ) +ENDIF() + +END() diff --git a/library/python/pytest/plugins/ya.py b/library/python/pytest/plugins/ya.py new file mode 100644 index 0000000000..1bde03042d --- /dev/null +++ b/library/python/pytest/plugins/ya.py @@ -0,0 +1,963 @@ +# coding: utf-8 + +import base64 +import errno +import re +import sys +import os +import logging +import fnmatch +import json +import time +import traceback +import collections +import signal +import inspect +import warnings + +import attr +import faulthandler +import py +import pytest +import six + +import _pytest +import _pytest._io +import _pytest.mark +import _pytest.outcomes +import _pytest.skipping + +from _pytest.warning_types import PytestUnhandledCoroutineWarning + +from yatest_lib import test_splitter + +try: + import resource +except ImportError: + resource = None + +try: + import library.python.pytest.yatest_tools as tools +except ImportError: + # fallback for pytest script mode + import yatest_tools as tools + +try: + from library.python import filelock +except ImportError: + filelock = None + + +import yatest_lib.tools + +import yatest_lib.external as canon + +import yatest_lib.ya + +from library.python.pytest import context + +console_logger = logging.getLogger("console") +yatest_logger = logging.getLogger("ya.test") + + +_pytest.main.EXIT_NOTESTSCOLLECTED = 0 +SHUTDOWN_REQUESTED = False + +pytest_config = None + + +def configure_pdb_on_demand(): + import signal + + if hasattr(signal, "SIGUSR1"): + def on_signal(*args): + import ipdb + ipdb.set_trace() + + signal.signal(signal.SIGUSR1, on_signal) + + +class CustomImporter(object): + def __init__(self, roots): + self._roots = roots + + def find_module(self, fullname, package_path=None): + for path in self._roots: + full_path = self._get_module_path(path, fullname) + + if os.path.exists(full_path) and os.path.isdir(full_path) and not os.path.exists(os.path.join(full_path, "__init__.py")): + open(os.path.join(full_path, "__init__.py"), "w").close() + + return None + + def _get_module_path(self, path, fullname): + return os.path.join(path, *fullname.split('.')) + + +class YaTestLoggingFileHandler(logging.FileHandler): + pass + + +class _TokenFilterFormatter(logging.Formatter): + def __init__(self, fmt): + super(_TokenFilterFormatter, self).__init__(fmt) + self._replacements = [] + if not self._replacements: + if six.PY2: + for k, v in os.environ.iteritems(): + if k.endswith('TOKEN') and v: + self._replacements.append(v) + elif six.PY3: + for k, v in os.environ.items(): + if k.endswith('TOKEN') and v: + self._replacements.append(v) + self._replacements = sorted(self._replacements) + + def _filter(self, s): + for r in self._replacements: + s = s.replace(r, "[SECRET]") + + return s + + def format(self, record): + return self._filter(super(_TokenFilterFormatter, self).format(record)) + + +def setup_logging(log_path, level=logging.DEBUG, *other_logs): + logs = [log_path] + list(other_logs) + root_logger = logging.getLogger() + for i in range(len(root_logger.handlers) - 1, -1, -1): + if isinstance(root_logger.handlers[i], YaTestLoggingFileHandler): + root_logger.handlers.pop(i).close() + root_logger.setLevel(level) + for log_file in logs: + file_handler = YaTestLoggingFileHandler(log_file) + log_format = '%(asctime)s - %(levelname)s - %(name)s - %(funcName)s: %(message)s' + file_handler.setFormatter(_TokenFilterFormatter(log_format)) + file_handler.setLevel(level) + root_logger.addHandler(file_handler) + + +def pytest_addoption(parser): + parser.addoption("--build-root", action="store", dest="build_root", default="", help="path to the build root") + parser.addoption("--dep-root", action="append", dest="dep_roots", default=[], help="path to the dep build roots") + parser.addoption("--source-root", action="store", dest="source_root", default="", help="path to the source root") + parser.addoption("--data-root", action="store", dest="data_root", default="", help="path to the arcadia_tests_data root") + parser.addoption("--output-dir", action="store", dest="output_dir", default="", help="path to the test output dir") + parser.addoption("--python-path", action="store", dest="python_path", default="", help="path the canonical python binary") + parser.addoption("--valgrind-path", action="store", dest="valgrind_path", default="", help="path the canonical valgring binary") + parser.addoption("--test-filter", action="append", dest="test_filter", default=None, help="test filter") + parser.addoption("--test-file-filter", action="store", dest="test_file_filter", default=None, help="test file filter") + parser.addoption("--test-param", action="append", dest="test_params", default=None, help="test parameters") + parser.addoption("--test-log-level", action="store", dest="test_log_level", choices=["critical", "error", "warning", "info", "debug"], default="debug", help="test log level") + parser.addoption("--mode", action="store", choices=[yatest_lib.ya.RunMode.List, yatest_lib.ya.RunMode.Run], dest="mode", default=yatest_lib.ya.RunMode.Run, help="testing mode") + parser.addoption("--test-list-file", action="store", dest="test_list_file") + parser.addoption("--modulo", default=1, type=int) + parser.addoption("--modulo-index", default=0, type=int) + parser.addoption("--partition-mode", default='SEQUENTIAL', help="Split tests according to partitoin mode") + parser.addoption("--split-by-tests", action='store_true', help="Split test execution by tests instead of suites", default=False) + parser.addoption("--project-path", action="store", default="", help="path to CMakeList where test is declared") + parser.addoption("--build-type", action="store", default="", help="build type") + parser.addoption("--flags", action="append", dest="flags", default=[], help="build flags (-D)") + parser.addoption("--sanitize", action="store", default="", help="sanitize mode") + parser.addoption("--test-stderr", action="store_true", default=False, help="test stderr") + parser.addoption("--test-debug", action="store_true", default=False, help="test debug mode") + parser.addoption("--root-dir", action="store", default=None) + parser.addoption("--ya-trace", action="store", dest="ya_trace_path", default=None, help="path to ya trace report") + parser.addoption("--ya-version", action="store", dest="ya_version", default=0, type=int, help="allows to be compatible with ya and the new changes in ya-dev") + parser.addoption( + "--test-suffix", action="store", dest="test_suffix", default=None, help="add suffix to every test name" + ) + parser.addoption("--gdb-path", action="store", dest="gdb_path", default="", help="path the canonical gdb binary") + parser.addoption("--collect-cores", action="store_true", dest="collect_cores", default=False, help="allows core dump file recovering during test") + parser.addoption("--sanitizer-extra-checks", action="store_true", dest="sanitizer_extra_checks", default=False, help="enables extra checks for tests built with sanitizers") + parser.addoption("--report-deselected", action="store_true", dest="report_deselected", default=False, help="report deselected tests to the trace file") + parser.addoption("--pdb-on-sigusr1", action="store_true", default=False, help="setup pdb.set_trace on SIGUSR1") + parser.addoption("--test-tool-bin", help="Path to test_tool") + parser.addoption("--test-list-path", dest="test_list_path", action="store", help="path to test list", default="") + + +def from_ya_test(): + return "YA_TEST_RUNNER" in os.environ + + +def pytest_configure(config): + global pytest_config + pytest_config = config + + config.option.continue_on_collection_errors = True + + config.addinivalue_line("markers", "ya:external") + + config.from_ya_test = from_ya_test() + config.test_logs = collections.defaultdict(dict) + config.test_metrics = {} + config.suite_metrics = {} + config.configure_timestamp = time.time() + context = { + "project_path": config.option.project_path, + "test_stderr": config.option.test_stderr, + "test_debug": config.option.test_debug, + "build_type": config.option.build_type, + "test_traceback": config.option.tbstyle, + "flags": config.option.flags, + "sanitize": config.option.sanitize, + } + + if config.option.collectonly: + config.option.mode = yatest_lib.ya.RunMode.List + + config.ya = yatest_lib.ya.Ya( + config.option.mode, + config.option.source_root, + config.option.build_root, + config.option.dep_roots, + config.option.output_dir, + config.option.test_params, + context, + config.option.python_path, + config.option.valgrind_path, + config.option.gdb_path, + config.option.data_root, + ) + config.option.test_log_level = { + "critical": logging.CRITICAL, + "error": logging.ERROR, + "warning": logging.WARN, + "info": logging.INFO, + "debug": logging.DEBUG, + }[config.option.test_log_level] + + if not config.option.collectonly: + setup_logging(os.path.join(config.ya.output_dir, "run.log"), config.option.test_log_level) + config.current_item_nodeid = None + config.current_test_name = None + config.test_cores_count = 0 + config.collect_cores = config.option.collect_cores + config.sanitizer_extra_checks = config.option.sanitizer_extra_checks + try: + config.test_tool_bin = config.option.test_tool_bin + except AttributeError: + logging.info("test_tool_bin not specified") + + if config.sanitizer_extra_checks: + for envvar in ['LSAN_OPTIONS', 'ASAN_OPTIONS']: + if envvar in os.environ: + os.environ.pop(envvar) + if envvar + '_ORIGINAL' in os.environ: + os.environ[envvar] = os.environ[envvar + '_ORIGINAL'] + + if config.option.root_dir: + config.rootdir = py.path.local(config.option.root_dir) + config.invocation_params = attr.evolve(config.invocation_params, dir=config.rootdir) + + extra_sys_path = [] + # Arcadia paths from the test DEPENDS section of ya.make + extra_sys_path.append(os.path.join(config.option.source_root, config.option.project_path)) + # Build root is required for correct import of protobufs, because imports are related to the root + # (like import devtools.dummy_arcadia.protos.lib.my_proto_pb2) + extra_sys_path.append(config.option.build_root) + + for path in config.option.dep_roots: + if os.path.isabs(path): + extra_sys_path.append(path) + else: + extra_sys_path.append(os.path.join(config.option.source_root, path)) + + sys_path_set = set(sys.path) + for path in extra_sys_path: + if path not in sys_path_set: + sys.path.append(path) + sys_path_set.add(path) + + os.environ["PYTHONPATH"] = os.pathsep.join(sys.path) + + if not config.option.collectonly: + if config.option.ya_trace_path: + config.ya_trace_reporter = TraceReportGenerator(config.option.ya_trace_path) + else: + config.ya_trace_reporter = DryTraceReportGenerator(config.option.ya_trace_path) + config.ya_version = config.option.ya_version + + sys.meta_path.append(CustomImporter([config.option.build_root] + [os.path.join(config.option.build_root, dep) for dep in config.option.dep_roots])) + if config.option.pdb_on_sigusr1: + configure_pdb_on_demand() + + # Dump python backtrace in case of any errors + faulthandler.enable() + if hasattr(signal, "SIGQUIT"): + # SIGQUIT is used by test_tool to teardown tests which overruns timeout + faulthandler.register(signal.SIGQUIT, chain=True) + + if hasattr(signal, "SIGUSR2"): + signal.signal(signal.SIGUSR2, _graceful_shutdown) + + +session_should_exit = False + + +def _graceful_shutdown_on_log(should_exit): + if should_exit: + pytest.exit("Graceful shutdown requested") + + +def pytest_runtest_logreport(report): + _graceful_shutdown_on_log(session_should_exit) + + +def pytest_runtest_logstart(nodeid, location): + _graceful_shutdown_on_log(session_should_exit) + + +def pytest_runtest_logfinish(nodeid, location): + _graceful_shutdown_on_log(session_should_exit) + + +def _graceful_shutdown(*args): + global session_should_exit + session_should_exit = True + try: + import library.python.coverage + library.python.coverage.stop_coverage_tracing() + except ImportError: + pass + traceback.print_stack(file=sys.stderr) + capman = pytest_config.pluginmanager.getplugin("capturemanager") + capman.suspend(in_=True) + _graceful_shutdown_on_log(not capman.is_globally_capturing()) + + +def _get_rusage(): + return resource and resource.getrusage(resource.RUSAGE_SELF) + + +def _collect_test_rusage(item): + if resource and hasattr(item, "rusage"): + finish_rusage = _get_rusage() + ya_inst = pytest_config.ya + + def add_metric(attr_name, metric_name=None, modifier=None): + if not metric_name: + metric_name = attr_name + if not modifier: + modifier = lambda x: x + if hasattr(item.rusage, attr_name): + ya_inst.set_metric_value(metric_name, modifier(getattr(finish_rusage, attr_name) - getattr(item.rusage, attr_name))) + + for args in [ + ("ru_maxrss", "ru_rss", lambda x: x*1024), # to be the same as in util/system/rusage.cpp + ("ru_utime",), + ("ru_stime",), + ("ru_ixrss", None, lambda x: x*1024), + ("ru_idrss", None, lambda x: x*1024), + ("ru_isrss", None, lambda x: x*1024), + ("ru_majflt", "ru_major_pagefaults"), + ("ru_minflt", "ru_minor_pagefaults"), + ("ru_nswap",), + ("ru_inblock",), + ("ru_oublock",), + ("ru_msgsnd",), + ("ru_msgrcv",), + ("ru_nsignals",), + ("ru_nvcsw",), + ("ru_nivcsw",), + ]: + add_metric(*args) + + +def _get_item_tags(item): + tags = [] + for key, value in item.keywords.items(): + if key == 'pytestmark' and isinstance(value, list): + for mark in value: + tags.append(mark.name) + elif isinstance(value, _pytest.mark.MarkDecorator): + tags.append(key) + return tags + + +def pytest_runtest_setup(item): + item.rusage = _get_rusage() + pytest_config.test_cores_count = 0 + pytest_config.current_item_nodeid = item.nodeid + class_name, test_name = tools.split_node_id(item.nodeid) + test_log_path = tools.get_test_log_file_path(pytest_config.ya.output_dir, class_name, test_name) + setup_logging( + os.path.join(pytest_config.ya.output_dir, "run.log"), + pytest_config.option.test_log_level, + test_log_path + ) + pytest_config.test_logs[item.nodeid]['log'] = test_log_path + pytest_config.test_logs[item.nodeid]['logsdir'] = pytest_config.ya.output_dir + pytest_config.current_test_log_path = test_log_path + pytest_config.current_test_name = "{}::{}".format(class_name, test_name) + separator = "#" * 100 + yatest_logger.info(separator) + yatest_logger.info(test_name) + yatest_logger.info(separator) + yatest_logger.info("Test setup") + + test_item = CrashedTestItem(item.nodeid, pytest_config.option.test_suffix) + pytest_config.ya_trace_reporter.on_start_test_class(test_item) + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + + +def pytest_runtest_teardown(item, nextitem): + yatest_logger.info("Test teardown") + + +def pytest_runtest_call(item): + class_name, test_name = tools.split_node_id(item.nodeid) + yatest_logger.info("Test call (class_name: %s, test_name: %s)", class_name, test_name) + + +def pytest_deselected(items): + config = pytest_config + if config.option.report_deselected: + for item in items: + deselected_item = DeselectedTestItem(item.nodeid, config.option.test_suffix) + config.ya_trace_reporter.on_start_test_class(deselected_item) + config.ya_trace_reporter.on_finish_test_case(deselected_item) + config.ya_trace_reporter.on_finish_test_class(deselected_item) + + +@pytest.mark.trylast +def pytest_collection_modifyitems(items, config): + + def filter_items(filters): + filtered_items = [] + deselected_items = [] + for item in items: + canonical_node_id = str(CustomTestItem(item.nodeid, pytest_config.option.test_suffix)) + matched = False + for flt in filters: + if "::" not in flt and "*" not in flt: + flt += "*" # add support for filtering by module name + if canonical_node_id.endswith(flt) or fnmatch.fnmatch(tools.escape_for_fnmatch(canonical_node_id), tools.escape_for_fnmatch(flt)): + matched = True + if matched: + filtered_items.append(item) + else: + deselected_items.append(item) + + config.hook.pytest_deselected(items=deselected_items) + items[:] = filtered_items + + def filter_by_full_name(filters): + filter_set = {flt for flt in filters} + filtered_items = [] + deselected_items = [] + for item in items: + if item.nodeid in filter_set: + filtered_items.append(item) + else: + deselected_items.append(item) + + config.hook.pytest_deselected(items=deselected_items) + items[:] = filtered_items + + # XXX - check to be removed when tests for peerdirs don't run + for item in items: + if not item.nodeid: + item._nodeid = os.path.basename(item.location[0]) + if os.path.exists(config.option.test_list_path): + with open(config.option.test_list_path, 'r') as afile: + chunks = json.load(afile) + filters = chunks[config.option.modulo_index] + filter_by_full_name(filters) + else: + if config.option.test_filter: + filter_items(config.option.test_filter) + partition_mode = config.option.partition_mode + modulo = config.option.modulo + if modulo > 1: + items[:] = sorted(items, key=lambda item: item.nodeid) + modulo_index = config.option.modulo_index + split_by_tests = config.option.split_by_tests + items_by_classes = {} + res = [] + for item in items: + if item.nodeid.count("::") == 2 and not split_by_tests: + class_name = item.nodeid.rsplit("::", 1)[0] + if class_name not in items_by_classes: + items_by_classes[class_name] = [] + res.append(items_by_classes[class_name]) + items_by_classes[class_name].append(item) + else: + res.append([item]) + chunk_items = test_splitter.get_splitted_tests(res, modulo, modulo_index, partition_mode, is_sorted=True) + items[:] = [] + for item in chunk_items: + items.extend(item) + yatest_logger.info("Modulo %s tests are: %s", modulo_index, chunk_items) + + if config.option.mode == yatest_lib.ya.RunMode.Run: + for item in items: + test_item = NotLaunchedTestItem(item.nodeid, config.option.test_suffix) + config.ya_trace_reporter.on_start_test_class(test_item) + config.ya_trace_reporter.on_finish_test_case(test_item) + config.ya_trace_reporter.on_finish_test_class(test_item) + elif config.option.mode == yatest_lib.ya.RunMode.List: + tests = [] + for item in items: + item = CustomTestItem(item.nodeid, pytest_config.option.test_suffix, item.keywords) + record = { + "class": item.class_name, + "test": item.test_name, + "tags": _get_item_tags(item), + } + tests.append(record) + if config.option.test_list_file: + with open(config.option.test_list_file, 'w') as afile: + json.dump(tests, afile) + # TODO prettyboy remove after test_tool release - currently it's required for backward compatibility + sys.stderr.write(json.dumps(tests)) + + +def pytest_collectreport(report): + if not report.passed: + if hasattr(pytest_config, 'ya_trace_reporter'): + test_item = TestItem(report, None, pytest_config.option.test_suffix) + pytest_config.ya_trace_reporter.on_error(test_item) + else: + sys.stderr.write(yatest_lib.tools.to_utf8(report.longrepr)) + + +@pytest.mark.tryfirst +def pytest_pyfunc_call(pyfuncitem): + testfunction = pyfuncitem.obj + iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) + if iscoroutinefunction is not None and iscoroutinefunction(testfunction): + msg = "Coroutine functions are not natively supported and have been skipped.\n" + msg += "You need to install a suitable plugin for your async framework, for example:\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-trio\n" + msg += " - pytest-tornasync" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid))) + _pytest.outcomes.skip(msg="coroutine function and no async plugin installed (see warnings)") + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + pyfuncitem.retval = testfunction(**testargs) + return True + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + def logreport(report, result, call): + test_item = TestItem(report, result, pytest_config.option.test_suffix) + if not pytest_config.suite_metrics and context.Ctx.get("YA_PYTEST_START_TIMESTAMP"): + pytest_config.suite_metrics["pytest_startup_duration"] = call.start - context.Ctx["YA_PYTEST_START_TIMESTAMP"] + pytest_config.ya_trace_reporter.dump_suite_metrics() + + pytest_config.ya_trace_reporter.on_log_report(test_item) + + if report.outcome == "failed": + yatest_logger.error(report.longrepr) + + if report.when == "call": + _collect_test_rusage(item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + elif report.when == "setup": + pytest_config.ya_trace_reporter.on_start_test_class(test_item) + if report.outcome != "passed": + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + else: + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + elif report.when == "teardown": + if report.outcome == "failed": + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + else: + pytest_config.ya_trace_reporter.on_finish_test_case(test_item, duration_only=True) + pytest_config.ya_trace_reporter.on_finish_test_class(test_item) + + outcome = yield + rep = outcome.get_result() + result = None + if hasattr(item, 'retval') and item.retval is not None: + result = item.retval + if not pytest_config.from_ya_test: + ti = TestItem(rep, result, pytest_config.option.test_suffix) + tr = pytest_config.pluginmanager.getplugin('terminalreporter') + tr.write_line("{} - Validating canonical data is not supported when running standalone binary".format(ti), yellow=True, bold=True) + logreport(rep, result, call) + + +def pytest_make_parametrize_id(config, val, argname): + # Avoid <, > symbols in canondata file names + if inspect.isfunction(val) and val.__name__ == "<lambda>": + return str(argname) + return None + + +def get_formatted_error(report): + if isinstance(report.longrepr, tuple): + text = "" + for entry in report.longrepr: + text += colorize(entry) + else: + text = colorize(report.longrepr) + text = yatest_lib.tools.to_utf8(text) + return text + + +def colorize(longrepr): + # use default pytest colorization + if pytest_config.option.tbstyle != "short": + io = py.io.TextIO() + if six.PY2: + writer = py.io.TerminalWriter(file=io) + else: + writer = _pytest._io.TerminalWriter(file=io) + # enable colorization + writer.hasmarkup = True + + if hasattr(longrepr, 'reprtraceback') and hasattr(longrepr.reprtraceback, 'toterminal'): + longrepr.reprtraceback.toterminal(writer) + return io.getvalue().strip() + return yatest_lib.tools.to_utf8(longrepr) + + text = yatest_lib.tools.to_utf8(longrepr) + pos = text.find("E ") + if pos == -1: + return text + + bt, error = text[:pos], text[pos:] + filters = [ + # File path, line number and function name + (re.compile(r"^(.*?):(\d+): in (\S+)", flags=re.MULTILINE), r"[[unimp]]\1[[rst]]:[[alt2]]\2[[rst]]: in [[alt1]]\3[[rst]]"), + ] + for regex, substitution in filters: + bt = regex.sub(substitution, bt) + return "{}[[bad]]{}".format(bt, error) + + +class TestItem(object): + + def __init__(self, report, result, test_suffix): + self._result = result + self.nodeid = report.nodeid + self._class_name, self._test_name = tools.split_node_id(self.nodeid, test_suffix) + self._error = None + self._status = None + self._process_report(report) + self._duration = hasattr(report, 'duration') and report.duration or 0 + self._keywords = getattr(report, "keywords", {}) + + def _process_report(self, report): + if report.longrepr: + self.set_error(report) + if hasattr(report, 'when') and report.when != "call": + self.set_error(report.when + " failed:\n" + self._error) + else: + self.set_error("") + + report_teststatus = _pytest.skipping.pytest_report_teststatus(report) + if report_teststatus is not None: + report_teststatus = report_teststatus[0] + + if report_teststatus == 'xfailed': + self._status = 'xfail' + self.set_error(report.wasxfail, 'imp') + elif report_teststatus == 'xpassed': + self._status = 'xpass' + self.set_error("Test unexpectedly passed") + elif report.skipped: + self._status = 'skipped' + self.set_error(yatest_lib.tools.to_utf8(report.longrepr[-1])) + elif report.passed: + self._status = 'good' + self.set_error("") + else: + self._status = 'fail' + + @property + def status(self): + return self._status + + def set_status(self, status): + self._status = status + + @property + def test_name(self): + return tools.normalize_name(self._test_name) + + @property + def class_name(self): + return tools.normalize_name(self._class_name) + + @property + def error(self): + return self._error + + def set_error(self, entry, marker='bad'): + if isinstance(entry, _pytest.reports.BaseReport): + self._error = get_formatted_error(entry) + else: + self._error = "[[{}]]{}".format(yatest_lib.tools.to_str(marker), yatest_lib.tools.to_str(entry)) + + @property + def duration(self): + return self._duration + + @property + def result(self): + if 'not_canonize' in self._keywords: + return None + return self._result + + @property + def keywords(self): + return self._keywords + + def __str__(self): + return "{}::{}".format(self.class_name, self.test_name) + + +class CustomTestItem(TestItem): + + def __init__(self, nodeid, test_suffix, keywords=None): + self._result = None + self.nodeid = nodeid + self._class_name, self._test_name = tools.split_node_id(nodeid, test_suffix) + self._duration = 0 + self._error = "" + self._keywords = keywords if keywords is not None else {} + + +class NotLaunchedTestItem(CustomTestItem): + + def __init__(self, nodeid, test_suffix): + super(NotLaunchedTestItem, self).__init__(nodeid, test_suffix) + self._status = "not_launched" + + +class CrashedTestItem(CustomTestItem): + + def __init__(self, nodeid, test_suffix): + super(CrashedTestItem, self).__init__(nodeid, test_suffix) + self._status = "crashed" + + +class DeselectedTestItem(CustomTestItem): + + def __init__(self, nodeid, test_suffix): + super(DeselectedTestItem, self).__init__(nodeid, test_suffix) + self._status = "deselected" + + +class TraceReportGenerator(object): + + def __init__(self, out_file_path): + self._filename = out_file_path + self._file = open(out_file_path, 'w') + self._wreckage_filename = out_file_path + '.wreckage' + self._test_messages = {} + self._test_duration = {} + # Some machinery to avoid data corruption due sloppy fork() + self._current_test = (None, None) + self._pid = os.getpid() + self._check_intricate_respawn() + + def _check_intricate_respawn(self): + pid_file = self._filename + '.pid' + try: + # python2 doesn't support open(f, 'x') + afile = os.fdopen(os.open(pid_file, os.O_WRONLY | os.O_EXCL | os.O_CREAT), 'w') + afile.write(str(self._pid)) + afile.close() + return + except OSError as e: + if e.errno != errno.EEXIST: + raise + + # Looks like the test binary was respawned + if from_ya_test(): + try: + with open(pid_file) as afile: + prev_pid = afile.read() + except Exception as e: + prev_pid = '(failed to obtain previous pid: {})'.format(e) + + parts = [ + "Aborting test run: test machinery found that the test binary {} has already been run before.".format(sys.executable), + "Looks like test has incorrect respawn/relaunch logic within test binary.", + "Test should not try to restart itself - this is a poorly designed test case that leads to errors and could corrupt internal test machinery files.", + "Debug info: previous pid:{} current:{}".format(prev_pid, self._pid), + ] + msg = '\n'.join(parts) + yatest_logger.error(msg) + + if filelock: + lock = filelock.FileLock(self._wreckage_filename + '.lock') + lock.acquire() + + with open(self._wreckage_filename, 'a') as afile: + self._file = afile + + self._dump_trace('chunk_event', {"errors": [('fail', '[[bad]]' + msg)]}) + + raise Exception(msg) + else: + # Test binary is launched without `ya make -t`'s testing machinery - don't rely on clean environment + pass + + def on_start_test_class(self, test_item): + pytest_config.ya.set_test_item_node_id(test_item.nodeid) + class_name = test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name + self._current_test = (class_name, None) + self.trace('test-started', {'class': class_name}) + + def on_finish_test_class(self, test_item): + pytest_config.ya.set_test_item_node_id(test_item.nodeid) + self.trace('test-finished', {'class': test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name}) + + def on_start_test_case(self, test_item): + class_name = yatest_lib.tools.to_utf8(test_item.class_name) + subtest_name = yatest_lib.tools.to_utf8(test_item.test_name) + message = { + 'class': class_name, + 'subtest': subtest_name, + } + if test_item.nodeid in pytest_config.test_logs: + message['logs'] = pytest_config.test_logs[test_item.nodeid] + pytest_config.ya.set_test_item_node_id(test_item.nodeid) + self._current_test = (class_name, subtest_name) + self.trace('subtest-started', message) + + def on_finish_test_case(self, test_item, duration_only=False): + if test_item.result is not None: + try: + result = canon.serialize(test_item.result) + except Exception as e: + yatest_logger.exception("Error while serializing test results") + test_item.set_error("Invalid test result: {}".format(e)) + test_item.set_status("fail") + result = None + else: + result = None + + if duration_only and test_item.nodeid in self._test_messages: # add teardown time + message = self._test_messages[test_item.nodeid] + else: + comment = self._test_messages[test_item.nodeid]['comment'] if test_item.nodeid in self._test_messages else '' + comment += self._get_comment(test_item) + message = { + 'class': yatest_lib.tools.to_utf8(test_item.class_name), + 'subtest': yatest_lib.tools.to_utf8(test_item.test_name), + 'status': test_item.status, + 'comment': comment, + 'result': result, + 'metrics': pytest_config.test_metrics.get(test_item.nodeid), + 'is_diff_test': 'diff_test' in test_item.keywords, + 'tags': _get_item_tags(test_item), + } + if test_item.nodeid in pytest_config.test_logs: + message['logs'] = pytest_config.test_logs[test_item.nodeid] + + message['time'] = self._test_duration.get(test_item.nodeid, test_item.duration) + + self.trace('subtest-finished', message) + self._test_messages[test_item.nodeid] = message + + def dump_suite_metrics(self): + message = {"metrics": pytest_config.suite_metrics} + self.trace("suite-event", message) + + def on_error(self, test_item): + self.trace('chunk_event', {"errors": [(test_item.status, self._get_comment(test_item))]}) + + def on_log_report(self, test_item): + if test_item.nodeid in self._test_duration: + self._test_duration[test_item.nodeid] += test_item._duration + else: + self._test_duration[test_item.nodeid] = test_item._duration + + @staticmethod + def _get_comment(test_item): + msg = yatest_lib.tools.to_utf8(test_item.error) + if not msg: + return "" + return msg + "[[rst]]" + + def _dump_trace(self, name, value): + event = { + 'timestamp': time.time(), + 'value': value, + 'name': name + } + + data = yatest_lib.tools.to_str(json.dumps(event, ensure_ascii=False)) + self._file.write(data + '\n') + self._file.flush() + + def _check_sloppy_fork(self, name, value): + if self._pid == os.getpid(): + return + + yatest_logger.error("Skip tracing to avoid data corruption, name = %s, value = %s", name, value) + + try: + # Lock wreckage tracefile to avoid race if multiple tests use fork sloppily + if filelock: + lock = filelock.FileLock(self._wreckage_filename + '.lock') + lock.acquire() + + with open(self._wreckage_filename, 'a') as afile: + self._file = afile + + parts = [ + "It looks like you have leaked process - it could corrupt internal test machinery files.", + "Usually it happens when you casually use fork() without os._exit(),", + "which results in two pytest processes running at the same time.", + "Pid of the original pytest's process is {}, however current process has {} pid.".format(self._pid, os.getpid()), + ] + if self._current_test[1]: + parts.append("Most likely the problem is in '{}' test.".format(self._current_test)) + else: + parts.append("Most likely new process was created before any test was launched (during the import stage?).") + + if value.get('comment'): + comment = value.get('comment', '').strip() + # multiline comment + newline_required = '\n' if '\n' in comment else '' + parts.append("Debug info: name = '{}' comment:{}{}".format(name, newline_required, comment)) + else: + val_str = json.dumps(value, ensure_ascii=False).encode('utf-8') + parts.append("Debug info: name = '{}' value = '{}'".format(name, base64.b64encode(val_str))) + + msg = "[[bad]]{}".format('\n'.join(parts)) + class_name, subtest_name = self._current_test + if subtest_name: + data = { + 'class': class_name, + 'subtest': subtest_name, + 'status': 'fail', + 'comment': msg, + } + # overwrite original status + self._dump_trace('subtest-finished', data) + else: + self._dump_trace('chunk_event', {"errors": [('fail', msg)]}) + except Exception as e: + yatest_logger.exception(e) + finally: + os._exit(38) + + def trace(self, name, value): + self._check_sloppy_fork(name, value) + self._dump_trace(name, value) + + +class DryTraceReportGenerator(TraceReportGenerator): + """ + Generator does not write any information. + """ + + def __init__(self, *args, **kwargs): + self._test_messages = {} + self._test_duration = {} + + def trace(self, name, value): + pass diff --git a/library/python/pytest/pytest.yatest.ini b/library/python/pytest/pytest.yatest.ini new file mode 100644 index 0000000000..70d6c98516 --- /dev/null +++ b/library/python/pytest/pytest.yatest.ini @@ -0,0 +1,7 @@ +[pytest] +pep8maxlinelength = 200 +norecursedirs = * +pep8ignore = E127 E123 E226 E24 +filterwarnings = + ignore::pytest.RemovedInPytest4Warning +addopts = -p no:warnings diff --git a/library/python/pytest/rewrite.py b/library/python/pytest/rewrite.py new file mode 100644 index 0000000000..ec188d847f --- /dev/null +++ b/library/python/pytest/rewrite.py @@ -0,0 +1,123 @@ +from __future__ import absolute_import +from __future__ import print_function + +import ast + +import py + +from _pytest.assertion import rewrite +try: + import importlib.util +except ImportError: + pass +from __res import importer +import sys +import six + + +def _get_state(config): + if hasattr(config, '_assertstate'): + return config._assertstate + return config._store[rewrite.assertstate_key] + + +class AssertionRewritingHook(rewrite.AssertionRewritingHook): + def __init__(self, *args, **kwargs): + self.modules = {} + super(AssertionRewritingHook, self).__init__(*args, **kwargs) + + def find_module(self, name, path=None): + co = self._find_module(name, path) + if co is not None: + return self + + def _find_module(self, name, path=None): + state = _get_state(self.config) + if not self._should_rewrite(name, None, state): + return None + state.trace("find_module called for: %s" % name) + + try: + if self.is_package(name): + return None + except ImportError: + return None + + self._rewritten_names.add(name) + + state.trace("rewriting %s" % name) + co = _rewrite_test(self.config, name) + if co is None: + # Probably a SyntaxError in the test. + return None + self.modules[name] = co, None + return co + + def find_spec(self, name, path=None, target=None): + co = self._find_module(name, path) + if co is not None: + return importlib.util.spec_from_file_location( + name, + co.co_filename, + loader=self, + ) + + def _should_rewrite(self, name, fn, state): + if name.startswith("__tests__.") or name.endswith(".conftest"): + return True + + return self._is_marked_for_rewrite(name, state) + + def is_package(self, name): + return importer.is_package(name) + + def get_source(self, name): + return importer.get_source(name) + + if six.PY3: + def load_module(self, module): + co, _ = self.modules.pop(module.__name__) + try: + module.__file__ = co.co_filename + module.__cached__ = None + module.__loader__ = self + module.__spec__ = importlib.util.spec_from_file_location(module.__name__, co.co_filename, loader=self) + exec(co, module.__dict__) + except: # noqa + if module.__name__ in sys.modules: + del sys.modules[module.__name__] + raise + return sys.modules[module.__name__] + + def exec_module(self, module): + if module.__name__ in self.modules: + self.load_module(module) + else: + super(AssertionRewritingHook, self).exec_module(module) + + +def _rewrite_test(config, name): + """Try to read and rewrite *fn* and return the code object.""" + state = _get_state(config) + + source = importer.get_source(name) + if source is None: + return None + + path = importer.get_filename(name) + + try: + tree = ast.parse(source, filename=path) + except SyntaxError: + # Let this pop up again in the real import. + state.trace("failed to parse: %r" % (path,)) + return None + rewrite.rewrite_asserts(tree, py.path.local(path), config) + try: + co = compile(tree, path, "exec", dont_inherit=True) + except SyntaxError: + # It's possible that this error is from some bug in the + # assertion rewriting, but I don't know of a fast way to tell. + state.trace("failed to compile: %r" % (path,)) + return None + return co diff --git a/library/python/pytest/ya.make b/library/python/pytest/ya.make new file mode 100644 index 0000000000..060c92c313 --- /dev/null +++ b/library/python/pytest/ya.make @@ -0,0 +1,32 @@ +PY23_LIBRARY() + +OWNER( + g:yatool + dmitko +) + +PY_SRCS( + __init__.py + main.py + rewrite.py + yatest_tools.py + context.py +) + +PEERDIR( + contrib/python/dateutil + contrib/python/ipdb + contrib/python/py + contrib/python/pytest + contrib/python/requests + library/python/pytest/plugins + library/python/testing/yatest_common + library/python/testing/yatest_lib +) + +RESOURCE_FILES( + PREFIX library/python/pytest/ + pytest.yatest.ini +) + +END() diff --git a/library/python/pytest/yatest_tools.py b/library/python/pytest/yatest_tools.py new file mode 100644 index 0000000000..6b8b896394 --- /dev/null +++ b/library/python/pytest/yatest_tools.py @@ -0,0 +1,304 @@ +# coding: utf-8 + +import collections +import functools +import math +import os +import re +import sys + +import yatest_lib.tools + + +class Subtest(object): + def __init__(self, name, test_name, status, comment, elapsed, result=None, test_type=None, logs=None, cwd=None, metrics=None): + self._name = name + self._test_name = test_name + self.status = status + self.elapsed = elapsed + self.comment = comment + self.result = result + self.test_type = test_type + self.logs = logs or {} + self.cwd = cwd + self.metrics = metrics + + def __eq__(self, other): + if not isinstance(other, Subtest): + return False + return self.name == other.name and self.test_name == other.test_name + + def __str__(self): + return yatest_lib.tools.to_utf8(unicode(self)) + + def __unicode__(self): + return u"{}::{}".format(self.test_name, self.test_name) + + @property + def name(self): + return yatest_lib.tools.to_utf8(self._name) + + @property + def test_name(self): + return yatest_lib.tools.to_utf8(self._test_name) + + def __repr__(self): + return "Subtest [{}::{} - {}[{}]: {}]".format(self.name, self.test_name, self.status, self.elapsed, self.comment) + + def __hash__(self): + return hash(str(self)) + + +class SubtestInfo(object): + + skipped_prefix = '[SKIPPED] ' + + @classmethod + def from_str(cls, s): + if s.startswith(SubtestInfo.skipped_prefix): + s = s[len(SubtestInfo.skipped_prefix):] + skipped = True + + else: + skipped = False + + return SubtestInfo(*s.rsplit(TEST_SUBTEST_SEPARATOR, 1), skipped=skipped) + + def __init__(self, test, subtest="", skipped=False, **kwargs): + self.test = test + self.subtest = subtest + self.skipped = skipped + for key, value in kwargs.iteritems(): + setattr(self, key, value) + + def __str__(self): + s = '' + + if self.skipped: + s += SubtestInfo.skipped_prefix + + return s + TEST_SUBTEST_SEPARATOR.join([self.test, self.subtest]) + + def __repr__(self): + return str(self) + + +class Status(object): + GOOD, XFAIL, FAIL, XPASS, MISSING, CRASHED, TIMEOUT = range(7) + SKIPPED = -100 + NOT_LAUNCHED = -200 + CANON_DIFF = -300 + FLAKY = -1 + BY_NAME = {'good': GOOD, 'fail': FAIL, 'xfail': XFAIL, 'xpass': XPASS, 'missing': MISSING, 'crashed': CRASHED, + 'skipped': SKIPPED, 'flaky': FLAKY, 'not_launched': NOT_LAUNCHED, 'timeout': TIMEOUT, 'diff': CANON_DIFF} + TO_STR = {GOOD: 'good', FAIL: 'fail', XFAIL: 'xfail', XPASS: 'xpass', MISSING: 'missing', CRASHED: 'crashed', + SKIPPED: 'skipped', FLAKY: 'flaky', NOT_LAUNCHED: 'not_launched', TIMEOUT: 'timeout', CANON_DIFF: 'diff'} + + +class Test(object): + def __init__(self, name, path, status=None, comment=None, subtests=None): + self.name = name + self.path = path + self.status = status + self.comment = comment + self.subtests = subtests or [] + + def __eq__(self, other): + if not isinstance(other, Test): + return False + return self.name == other.name and self.path == other.path + + def __str__(self): + return "Test [{} {}] - {} - {}".format(self.name, self.path, self.status, self.comment) + + def __repr__(self): + return str(self) + + def add_subtest(self, subtest): + self.subtests.append(subtest) + + def setup_status(self, status, comment): + self.status = Status.BY_NAME[status or 'good'] + if len(self.subtests) != 0: + self.status = max(self.status, max(s.status for s in self.subtests)) + self.comment = comment + + def subtests_by_status(self, status): + return [x.status for x in self.subtests].count(status) + + +class NoMd5FileException(Exception): + pass + + +TEST_SUBTEST_SEPARATOR = '::' + + +# TODO: extract color theme logic from ya +COLOR_THEME = { + 'test_name': 'light-blue', + 'test_project_path': 'dark-blue', + 'test_dir_desc': 'dark-magenta', + 'test_binary_path': 'light-gray', +} + + +# XXX: remove me +class YaCtx(object): + pass + +ya_ctx = YaCtx() + +TRACE_FILE_NAME = "ytest.report.trace" + + +def lazy(func): + mem = {} + + @functools.wraps(func) + def wrapper(): + if "results" not in mem: + mem["results"] = func() + return mem["results"] + + return wrapper + + +@lazy +def _get_mtab(): + if os.path.exists("/etc/mtab"): + with open("/etc/mtab") as afile: + data = afile.read() + return [line.split(" ") for line in data.split("\n") if line] + return [] + + +def get_max_filename_length(dirname): + """ + Return maximum filename length for the filesystem + :return: + """ + if sys.platform.startswith("linux"): + # Linux user's may work on mounted ecryptfs filesystem + # which has filename length limitations + for entry in _get_mtab(): + mounted_dir, filesystem = entry[1], entry[2] + # http://unix.stackexchange.com/questions/32795/what-is-the-maximum-allowed-filename-and-folder-size-with-ecryptfs + if filesystem == "ecryptfs" and dirname and dirname.startswith(mounted_dir): + return 140 + # default maximum filename length for most filesystems + return 255 + + +def get_unique_file_path(dir_path, filename, cache=collections.defaultdict(set)): + """ + Get unique filename in dir with proper filename length, using given filename/dir. + File/dir won't be created (thread nonsafe) + :param dir_path: path to dir + :param filename: original filename + :return: unique filename + """ + max_suffix = 10000 + # + 1 symbol for dot before suffix + tail_length = int(round(math.log(max_suffix, 10))) + 1 + # truncate filename length in accordance with filesystem limitations + filename, extension = os.path.splitext(filename) + # XXX + if sys.platform.startswith("win"): + # Trying to fit into MAX_PATH if it's possible. + # Remove after DEVTOOLS-1646 + max_path = 260 + filename_len = len(dir_path) + len(extension) + tail_length + len(os.sep) + if filename_len < max_path: + filename = yatest_lib.tools.trim_string(filename, max_path - filename_len) + filename = yatest_lib.tools.trim_string(filename, get_max_filename_length(dir_path) - tail_length - len(extension)) + extension + candidate = os.path.join(dir_path, filename) + + key = dir_path + filename + counter = sorted(cache.get(key, {0, }))[-1] + while os.path.exists(candidate): + cache[key].add(counter) + counter += 1 + assert counter < max_suffix + candidate = os.path.join(dir_path, filename + ".{}".format(counter)) + return candidate + + +def escape_for_fnmatch(s): + return s.replace("[", "[").replace("]", "]") + + +def get_python_cmd(opts=None, use_huge=True, suite=None): + if opts and getattr(opts, 'flags', {}).get("USE_ARCADIA_PYTHON") == "no": + return ["python"] + if suite and not suite._use_arcadia_python: + return ["python"] + if use_huge: + return ["$(PYTHON)/python"] + ymake_path = opts.ymake_bin if opts and getattr(opts, 'ymake_bin', None) else "$(YMAKE)/ymake" + return [ymake_path, "--python"] + + +def normalize_name(name): + replacements = [ + ("\\", "\\\\"), + ("\n", "\\n"), + ("\t", "\\t"), + ("\r", "\\r"), + ] + for l, r in replacements: + name = name.replace(l, r) + return name + + +def normalize_filename(filename): + """ + Replace invalid for file names characters with string equivalents + :param some_string: string to be converted to a valid file name + :return: valid file name + """ + not_allowed_pattern = r"[\[\]\/:*?\"\'<>|+\0\\\s\x0b\x0c]" + filename = re.sub(not_allowed_pattern, ".", filename) + return re.sub(r"\.{2,}", ".", filename) + + +def get_test_log_file_path(output_dir, class_name, test_name, extension="log"): + """ + get test log file path, platform dependant + :param output_dir: dir where log file should be placed + :param class_name: test class name + :param test_name: test name + :return: test log file name + """ + if os.name == "nt": + # don't add class name to the log's filename + # to reduce it's length on windows + filename = test_name + else: + filename = "{}.{}".format(class_name, test_name) + if not filename: + filename = "test" + filename += "." + extension + filename = normalize_filename(filename) + return get_unique_file_path(output_dir, filename) + + +def split_node_id(nodeid, test_suffix=None): + path, possible_open_bracket, params = nodeid.partition('[') + separator = "::" + if separator in path: + path, test_name = path.split(separator, 1) + else: + test_name = os.path.basename(path) + if test_suffix: + test_name += "::" + test_suffix + class_name = os.path.basename(path.strip()) + if separator in test_name: + klass_name, test_name = test_name.split(separator, 1) + if not test_suffix: + # test suffix is used for flakes and pep8, no need to add class_name as it's === class_name + class_name += separator + klass_name + if separator in test_name: + test_name = test_name.split(separator)[-1] + test_name += possible_open_bracket + params + return yatest_lib.tools.to_utf8(class_name), yatest_lib.tools.to_utf8(test_name) |