diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
commit | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch) | |
tree | 012bb94d777798f1f56ac1cec429509766d05181 /library/python/pytest/plugins/ya.py | |
parent | 6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff) | |
download | ydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'library/python/pytest/plugins/ya.py')
-rw-r--r-- | library/python/pytest/plugins/ya.py | 172 |
1 files changed, 86 insertions, 86 deletions
diff --git a/library/python/pytest/plugins/ya.py b/library/python/pytest/plugins/ya.py index 1bde03042d..e34f618fc5 100644 --- a/library/python/pytest/plugins/ya.py +++ b/library/python/pytest/plugins/ya.py @@ -11,23 +11,23 @@ import json import time import traceback import collections -import signal -import inspect -import warnings - -import attr -import faulthandler +import signal +import inspect +import warnings + +import attr +import faulthandler import py import pytest -import six - +import six + import _pytest -import _pytest._io +import _pytest._io import _pytest.mark -import _pytest.outcomes +import _pytest.outcomes import _pytest.skipping -from _pytest.warning_types import PytestUnhandledCoroutineWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning from yatest_lib import test_splitter @@ -63,9 +63,9 @@ yatest_logger = logging.getLogger("ya.test") _pytest.main.EXIT_NOTESTSCOLLECTED = 0 SHUTDOWN_REQUESTED = False -pytest_config = None - +pytest_config = None + def configure_pdb_on_demand(): import signal @@ -128,7 +128,7 @@ def setup_logging(log_path, level=logging.DEBUG, *other_logs): root_logger = logging.getLogger() for i in range(len(root_logger.handlers) - 1, -1, -1): if isinstance(root_logger.handlers[i], YaTestLoggingFileHandler): - root_logger.handlers.pop(i).close() + root_logger.handlers.pop(i).close() root_logger.setLevel(level) for log_file in logs: file_handler = YaTestLoggingFileHandler(log_file) @@ -182,13 +182,13 @@ def from_ya_test(): def pytest_configure(config): - global pytest_config - pytest_config = config - + global pytest_config + pytest_config = config + config.option.continue_on_collection_errors = True - config.addinivalue_line("markers", "ya:external") - + config.addinivalue_line("markers", "ya:external") + config.from_ya_test = from_ya_test() config.test_logs = collections.defaultdict(dict) config.test_metrics = {} @@ -248,8 +248,8 @@ def pytest_configure(config): os.environ[envvar] = os.environ[envvar + '_ORIGINAL'] if config.option.root_dir: - config.rootdir = py.path.local(config.option.root_dir) - config.invocation_params = attr.evolve(config.invocation_params, dir=config.rootdir) + config.rootdir = py.path.local(config.option.root_dir) + config.invocation_params = attr.evolve(config.invocation_params, dir=config.rootdir) extra_sys_path = [] # Arcadia paths from the test DEPENDS section of ya.make @@ -334,7 +334,7 @@ def _get_rusage(): def _collect_test_rusage(item): if resource and hasattr(item, "rusage"): finish_rusage = _get_rusage() - ya_inst = pytest_config.ya + ya_inst = pytest_config.ya def add_metric(attr_name, metric_name=None, modifier=None): if not metric_name: @@ -369,37 +369,37 @@ def _get_item_tags(item): tags = [] for key, value in item.keywords.items(): if key == 'pytestmark' and isinstance(value, list): - for mark in value: - tags.append(mark.name) - elif isinstance(value, _pytest.mark.MarkDecorator): + for mark in value: + tags.append(mark.name) + elif isinstance(value, _pytest.mark.MarkDecorator): tags.append(key) return tags def pytest_runtest_setup(item): item.rusage = _get_rusage() - pytest_config.test_cores_count = 0 - pytest_config.current_item_nodeid = item.nodeid + pytest_config.test_cores_count = 0 + pytest_config.current_item_nodeid = item.nodeid class_name, test_name = tools.split_node_id(item.nodeid) - test_log_path = tools.get_test_log_file_path(pytest_config.ya.output_dir, class_name, test_name) + test_log_path = tools.get_test_log_file_path(pytest_config.ya.output_dir, class_name, test_name) setup_logging( - os.path.join(pytest_config.ya.output_dir, "run.log"), - pytest_config.option.test_log_level, + os.path.join(pytest_config.ya.output_dir, "run.log"), + pytest_config.option.test_log_level, test_log_path ) - pytest_config.test_logs[item.nodeid]['log'] = test_log_path - pytest_config.test_logs[item.nodeid]['logsdir'] = pytest_config.ya.output_dir - pytest_config.current_test_log_path = test_log_path - pytest_config.current_test_name = "{}::{}".format(class_name, test_name) + pytest_config.test_logs[item.nodeid]['log'] = test_log_path + pytest_config.test_logs[item.nodeid]['logsdir'] = pytest_config.ya.output_dir + pytest_config.current_test_log_path = test_log_path + pytest_config.current_test_name = "{}::{}".format(class_name, test_name) separator = "#" * 100 yatest_logger.info(separator) yatest_logger.info(test_name) yatest_logger.info(separator) yatest_logger.info("Test setup") - test_item = CrashedTestItem(item.nodeid, pytest_config.option.test_suffix) - pytest_config.ya_trace_reporter.on_start_test_class(test_item) - pytest_config.ya_trace_reporter.on_start_test_case(test_item) + test_item = CrashedTestItem(item.nodeid, pytest_config.option.test_suffix) + pytest_config.ya_trace_reporter.on_start_test_class(test_item) + pytest_config.ya_trace_reporter.on_start_test_case(test_item) def pytest_runtest_teardown(item, nextitem): @@ -412,7 +412,7 @@ def pytest_runtest_call(item): def pytest_deselected(items): - config = pytest_config + config = pytest_config if config.option.report_deselected: for item in items: deselected_item = DeselectedTestItem(item.nodeid, config.option.test_suffix) @@ -428,7 +428,7 @@ def pytest_collection_modifyitems(items, config): filtered_items = [] deselected_items = [] for item in items: - canonical_node_id = str(CustomTestItem(item.nodeid, pytest_config.option.test_suffix)) + canonical_node_id = str(CustomTestItem(item.nodeid, pytest_config.option.test_suffix)) matched = False for flt in filters: if "::" not in flt and "*" not in flt: @@ -500,7 +500,7 @@ def pytest_collection_modifyitems(items, config): elif config.option.mode == yatest_lib.ya.RunMode.List: tests = [] for item in items: - item = CustomTestItem(item.nodeid, pytest_config.option.test_suffix, item.keywords) + item = CustomTestItem(item.nodeid, pytest_config.option.test_suffix, item.keywords) record = { "class": item.class_name, "test": item.test_name, @@ -516,9 +516,9 @@ def pytest_collection_modifyitems(items, config): def pytest_collectreport(report): if not report.passed: - if hasattr(pytest_config, 'ya_trace_reporter'): - test_item = TestItem(report, None, pytest_config.option.test_suffix) - pytest_config.ya_trace_reporter.on_error(test_item) + if hasattr(pytest_config, 'ya_trace_reporter'): + test_item = TestItem(report, None, pytest_config.option.test_suffix) + pytest_config.ya_trace_reporter.on_error(test_item) else: sys.stderr.write(yatest_lib.tools.to_utf8(report.longrepr)) @@ -526,60 +526,60 @@ def pytest_collectreport(report): @pytest.mark.tryfirst def pytest_pyfunc_call(pyfuncitem): testfunction = pyfuncitem.obj - iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) - if iscoroutinefunction is not None and iscoroutinefunction(testfunction): - msg = "Coroutine functions are not natively supported and have been skipped.\n" - msg += "You need to install a suitable plugin for your async framework, for example:\n" - msg += " - pytest-asyncio\n" - msg += " - pytest-trio\n" - msg += " - pytest-tornasync" - warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid))) - _pytest.outcomes.skip(msg="coroutine function and no async plugin installed (see warnings)") - funcargs = pyfuncitem.funcargs - testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} - pyfuncitem.retval = testfunction(**testargs) + iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) + if iscoroutinefunction is not None and iscoroutinefunction(testfunction): + msg = "Coroutine functions are not natively supported and have been skipped.\n" + msg += "You need to install a suitable plugin for your async framework, for example:\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-trio\n" + msg += " - pytest-tornasync" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid))) + _pytest.outcomes.skip(msg="coroutine function and no async plugin installed (see warnings)") + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + pyfuncitem.retval = testfunction(**testargs) return True @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): def logreport(report, result, call): - test_item = TestItem(report, result, pytest_config.option.test_suffix) - if not pytest_config.suite_metrics and context.Ctx.get("YA_PYTEST_START_TIMESTAMP"): - pytest_config.suite_metrics["pytest_startup_duration"] = call.start - context.Ctx["YA_PYTEST_START_TIMESTAMP"] - pytest_config.ya_trace_reporter.dump_suite_metrics() + test_item = TestItem(report, result, pytest_config.option.test_suffix) + if not pytest_config.suite_metrics and context.Ctx.get("YA_PYTEST_START_TIMESTAMP"): + pytest_config.suite_metrics["pytest_startup_duration"] = call.start - context.Ctx["YA_PYTEST_START_TIMESTAMP"] + pytest_config.ya_trace_reporter.dump_suite_metrics() - pytest_config.ya_trace_reporter.on_log_report(test_item) + pytest_config.ya_trace_reporter.on_log_report(test_item) if report.outcome == "failed": yatest_logger.error(report.longrepr) if report.when == "call": _collect_test_rusage(item) - pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) elif report.when == "setup": - pytest_config.ya_trace_reporter.on_start_test_class(test_item) + pytest_config.ya_trace_reporter.on_start_test_class(test_item) if report.outcome != "passed": - pytest_config.ya_trace_reporter.on_start_test_case(test_item) - pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) else: - pytest_config.ya_trace_reporter.on_start_test_case(test_item) + pytest_config.ya_trace_reporter.on_start_test_case(test_item) elif report.when == "teardown": if report.outcome == "failed": - pytest_config.ya_trace_reporter.on_start_test_case(test_item) - pytest_config.ya_trace_reporter.on_finish_test_case(test_item) + pytest_config.ya_trace_reporter.on_start_test_case(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item) else: - pytest_config.ya_trace_reporter.on_finish_test_case(test_item, duration_only=True) - pytest_config.ya_trace_reporter.on_finish_test_class(test_item) + pytest_config.ya_trace_reporter.on_finish_test_case(test_item, duration_only=True) + pytest_config.ya_trace_reporter.on_finish_test_class(test_item) outcome = yield rep = outcome.get_result() result = None if hasattr(item, 'retval') and item.retval is not None: result = item.retval - if not pytest_config.from_ya_test: - ti = TestItem(rep, result, pytest_config.option.test_suffix) - tr = pytest_config.pluginmanager.getplugin('terminalreporter') + if not pytest_config.from_ya_test: + ti = TestItem(rep, result, pytest_config.option.test_suffix) + tr = pytest_config.pluginmanager.getplugin('terminalreporter') tr.write_line("{} - Validating canonical data is not supported when running standalone binary".format(ti), yellow=True, bold=True) logreport(rep, result, call) @@ -604,12 +604,12 @@ def get_formatted_error(report): def colorize(longrepr): # use default pytest colorization - if pytest_config.option.tbstyle != "short": + if pytest_config.option.tbstyle != "short": io = py.io.TextIO() - if six.PY2: - writer = py.io.TerminalWriter(file=io) - else: - writer = _pytest._io.TerminalWriter(file=io) + if six.PY2: + writer = py.io.TerminalWriter(file=io) + else: + writer = _pytest._io.TerminalWriter(file=io) # enable colorization writer.hasmarkup = True @@ -804,13 +804,13 @@ class TraceReportGenerator(object): pass def on_start_test_class(self, test_item): - pytest_config.ya.set_test_item_node_id(test_item.nodeid) + pytest_config.ya.set_test_item_node_id(test_item.nodeid) class_name = test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name self._current_test = (class_name, None) self.trace('test-started', {'class': class_name}) def on_finish_test_class(self, test_item): - pytest_config.ya.set_test_item_node_id(test_item.nodeid) + pytest_config.ya.set_test_item_node_id(test_item.nodeid) self.trace('test-finished', {'class': test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name}) def on_start_test_case(self, test_item): @@ -820,9 +820,9 @@ class TraceReportGenerator(object): 'class': class_name, 'subtest': subtest_name, } - if test_item.nodeid in pytest_config.test_logs: - message['logs'] = pytest_config.test_logs[test_item.nodeid] - pytest_config.ya.set_test_item_node_id(test_item.nodeid) + if test_item.nodeid in pytest_config.test_logs: + message['logs'] = pytest_config.test_logs[test_item.nodeid] + pytest_config.ya.set_test_item_node_id(test_item.nodeid) self._current_test = (class_name, subtest_name) self.trace('subtest-started', message) @@ -849,12 +849,12 @@ class TraceReportGenerator(object): 'status': test_item.status, 'comment': comment, 'result': result, - 'metrics': pytest_config.test_metrics.get(test_item.nodeid), + 'metrics': pytest_config.test_metrics.get(test_item.nodeid), 'is_diff_test': 'diff_test' in test_item.keywords, 'tags': _get_item_tags(test_item), } - if test_item.nodeid in pytest_config.test_logs: - message['logs'] = pytest_config.test_logs[test_item.nodeid] + if test_item.nodeid in pytest_config.test_logs: + message['logs'] = pytest_config.test_logs[test_item.nodeid] message['time'] = self._test_duration.get(test_item.nodeid, test_item.duration) @@ -862,7 +862,7 @@ class TraceReportGenerator(object): self._test_messages[test_item.nodeid] = message def dump_suite_metrics(self): - message = {"metrics": pytest_config.suite_metrics} + message = {"metrics": pytest_config.suite_metrics} self.trace("suite-event", message) def on_error(self, test_item): |