diff options
author | iaz1607 <iaz1607@yandex-team.ru> | 2022-02-10 16:45:37 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:37 +0300 |
commit | 94e51c602b555459333b3c6ae92476c424c930bc (patch) | |
tree | b2cc84ee7850122e7ccf51d0ea21e4fa7e7a5685 /library/python/pytest/plugins/ya.py | |
parent | e5437feb4ac2d2dc044e1090b9312dde5ef197e0 (diff) | |
download | ydb-94e51c602b555459333b3c6ae92476c424c930bc.tar.gz |
Restoring authorship annotation for <iaz1607@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'library/python/pytest/plugins/ya.py')
-rw-r--r-- | library/python/pytest/plugins/ya.py | 110 |
1 files changed, 55 insertions, 55 deletions
diff --git a/library/python/pytest/plugins/ya.py b/library/python/pytest/plugins/ya.py index e396516740..1bde03042d 100644 --- a/library/python/pytest/plugins/ya.py +++ b/library/python/pytest/plugins/ya.py @@ -29,8 +29,8 @@ import _pytest.skipping from _pytest.warning_types import PytestUnhandledCoroutineWarning -from yatest_lib import test_splitter - +from yatest_lib import test_splitter + try: import resource except ImportError: @@ -54,8 +54,8 @@ import yatest_lib.external as canon import yatest_lib.ya -from library.python.pytest import context - +from library.python.pytest import context + console_logger = logging.getLogger("console") yatest_logger = logging.getLogger("ya.test") @@ -154,7 +154,7 @@ def pytest_addoption(parser): parser.addoption("--test-list-file", action="store", dest="test_list_file") parser.addoption("--modulo", default=1, type=int) parser.addoption("--modulo-index", default=0, type=int) - parser.addoption("--partition-mode", default='SEQUENTIAL', help="Split tests according to partitoin mode") + parser.addoption("--partition-mode", default='SEQUENTIAL', help="Split tests according to partitoin mode") parser.addoption("--split-by-tests", action='store_true', help="Split test execution by tests instead of suites", default=False) parser.addoption("--project-path", action="store", default="", help="path to CMakeList where test is declared") parser.addoption("--build-type", action="store", default="", help="build type") @@ -174,7 +174,7 @@ def pytest_addoption(parser): parser.addoption("--report-deselected", action="store_true", dest="report_deselected", default=False, help="report deselected tests to the trace file") parser.addoption("--pdb-on-sigusr1", action="store_true", default=False, help="setup pdb.set_trace on SIGUSR1") parser.addoption("--test-tool-bin", help="Path to test_tool") - parser.addoption("--test-list-path", dest="test_list_path", action="store", help="path to test list", default="") + parser.addoption("--test-list-path", dest="test_list_path", action="store", help="path to test list", default="") def from_ya_test(): @@ -192,8 +192,8 @@ def pytest_configure(config): config.from_ya_test = from_ya_test() config.test_logs = collections.defaultdict(dict) config.test_metrics = {} - config.suite_metrics = {} - config.configure_timestamp = time.time() + config.suite_metrics = {} + config.configure_timestamp = time.time() context = { "project_path": config.option.project_path, "test_stderr": config.option.test_stderr, @@ -443,53 +443,53 @@ def pytest_collection_modifyitems(items, config): config.hook.pytest_deselected(items=deselected_items) items[:] = filtered_items - def filter_by_full_name(filters): - filter_set = {flt for flt in filters} - filtered_items = [] - deselected_items = [] - for item in items: - if item.nodeid in filter_set: - filtered_items.append(item) - else: - deselected_items.append(item) - - config.hook.pytest_deselected(items=deselected_items) - items[:] = filtered_items - + def filter_by_full_name(filters): + filter_set = {flt for flt in filters} + filtered_items = [] + deselected_items = [] + for item in items: + if item.nodeid in filter_set: + filtered_items.append(item) + else: + deselected_items.append(item) + + config.hook.pytest_deselected(items=deselected_items) + items[:] = filtered_items + # XXX - check to be removed when tests for peerdirs don't run for item in items: if not item.nodeid: item._nodeid = os.path.basename(item.location[0]) - if os.path.exists(config.option.test_list_path): - with open(config.option.test_list_path, 'r') as afile: - chunks = json.load(afile) - filters = chunks[config.option.modulo_index] - filter_by_full_name(filters) - else: - if config.option.test_filter: - filter_items(config.option.test_filter) - partition_mode = config.option.partition_mode - modulo = config.option.modulo - if modulo > 1: - items[:] = sorted(items, key=lambda item: item.nodeid) - modulo_index = config.option.modulo_index - split_by_tests = config.option.split_by_tests - items_by_classes = {} - res = [] - for item in items: - if item.nodeid.count("::") == 2 and not split_by_tests: - class_name = item.nodeid.rsplit("::", 1)[0] - if class_name not in items_by_classes: - items_by_classes[class_name] = [] - res.append(items_by_classes[class_name]) - items_by_classes[class_name].append(item) - else: - res.append([item]) - chunk_items = test_splitter.get_splitted_tests(res, modulo, modulo_index, partition_mode, is_sorted=True) - items[:] = [] - for item in chunk_items: - items.extend(item) - yatest_logger.info("Modulo %s tests are: %s", modulo_index, chunk_items) + if os.path.exists(config.option.test_list_path): + with open(config.option.test_list_path, 'r') as afile: + chunks = json.load(afile) + filters = chunks[config.option.modulo_index] + filter_by_full_name(filters) + else: + if config.option.test_filter: + filter_items(config.option.test_filter) + partition_mode = config.option.partition_mode + modulo = config.option.modulo + if modulo > 1: + items[:] = sorted(items, key=lambda item: item.nodeid) + modulo_index = config.option.modulo_index + split_by_tests = config.option.split_by_tests + items_by_classes = {} + res = [] + for item in items: + if item.nodeid.count("::") == 2 and not split_by_tests: + class_name = item.nodeid.rsplit("::", 1)[0] + if class_name not in items_by_classes: + items_by_classes[class_name] = [] + res.append(items_by_classes[class_name]) + items_by_classes[class_name].append(item) + else: + res.append([item]) + chunk_items = test_splitter.get_splitted_tests(res, modulo, modulo_index, partition_mode, is_sorted=True) + items[:] = [] + for item in chunk_items: + items.extend(item) + yatest_logger.info("Modulo %s tests are: %s", modulo_index, chunk_items) if config.option.mode == yatest_lib.ya.RunMode.Run: for item in items: @@ -543,7 +543,7 @@ def pytest_pyfunc_call(pyfuncitem): @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): - def logreport(report, result, call): + def logreport(report, result, call): test_item = TestItem(report, result, pytest_config.option.test_suffix) if not pytest_config.suite_metrics and context.Ctx.get("YA_PYTEST_START_TIMESTAMP"): pytest_config.suite_metrics["pytest_startup_duration"] = call.start - context.Ctx["YA_PYTEST_START_TIMESTAMP"] @@ -861,10 +861,10 @@ class TraceReportGenerator(object): self.trace('subtest-finished', message) self._test_messages[test_item.nodeid] = message - def dump_suite_metrics(self): + def dump_suite_metrics(self): message = {"metrics": pytest_config.suite_metrics} - self.trace("suite-event", message) - + self.trace("suite-event", message) + def on_error(self, test_item): self.trace('chunk_event', {"errors": [(test_item.status, self._get_comment(test_item))]}) |