aboutsummaryrefslogtreecommitdiffstats
path: root/build/plugins
diff options
context:
space:
mode:
authorAleksandr <ivansduck@gmail.com>2022-02-10 16:47:52 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:52 +0300
commitb05913d1c3c02a773578bceb7285084d2933ae86 (patch)
treec0748b5dcbade83af788c0abfa89c0383d6b779c /build/plugins
parentea6c5b7f172becca389cacaff7d5f45f6adccbe6 (diff)
downloadydb-b05913d1c3c02a773578bceb7285084d2933ae86.tar.gz
Restoring authorship annotation for Aleksandr <ivansduck@gmail.com>. Commit 2 of 2.
Diffstat (limited to 'build/plugins')
-rw-r--r--build/plugins/_common.py14
-rw-r--r--build/plugins/_requirements.py24
-rw-r--r--build/plugins/_test_const.py282
-rw-r--r--build/plugins/code_generator.py2
-rw-r--r--build/plugins/macros_with_error.py34
-rw-r--r--build/plugins/pybuild.py282
-rw-r--r--build/plugins/res.py16
-rw-r--r--build/plugins/suppressions.py38
-rw-r--r--build/plugins/tests/test_requirements.py2
-rw-r--r--build/plugins/ytest.py648
10 files changed, 671 insertions, 671 deletions
diff --git a/build/plugins/_common.py b/build/plugins/_common.py
index 7bf29b4d6f..2f831a94db 100644
--- a/build/plugins/_common.py
+++ b/build/plugins/_common.py
@@ -188,13 +188,13 @@ def filter_out_by_keyword(test_data, keyword):
def generate_chunks(lst, chunk_size):
for i in xrange(0, len(lst), chunk_size):
yield lst[i:(i + chunk_size)]
-
-
-def strip_roots(path):
- for prefix in ["$B/", "$S/"]:
- if path.startswith(prefix):
- return path[len(prefix):]
- return path
+
+
+def strip_roots(path):
+ for prefix in ["$B/", "$S/"]:
+ if path.startswith(prefix):
+ return path[len(prefix):]
+ return path
def to_yesno(x):
diff --git a/build/plugins/_requirements.py b/build/plugins/_requirements.py
index 11cb92ebe7..c27635e852 100644
--- a/build/plugins/_requirements.py
+++ b/build/plugins/_requirements.py
@@ -6,22 +6,22 @@ def check_cpu(suite_cpu_requirements, test_size, is_kvm=False):
max_cpu_requirements = consts.TestSize.get_max_requirements(test_size).get(consts.TestRequirements.Cpu)
if isinstance(suite_cpu_requirements, str):
if all(consts.TestRequirementsConstants.is_all_cpu(req) for req in (max_cpu_requirements, suite_cpu_requirements)):
- return None
- return "Wrong 'cpu' requirements: {}, should be in [{}..{}] for {}-size tests".format(suite_cpu_requirements, min_cpu_requirements, max_cpu_requirements, test_size)
+ return None
+ return "Wrong 'cpu' requirements: {}, should be in [{}..{}] for {}-size tests".format(suite_cpu_requirements, min_cpu_requirements, max_cpu_requirements, test_size)
if not isinstance(suite_cpu_requirements, int):
- return "Wrong 'cpu' requirements: {}, should be integer".format(suite_cpu_requirements)
+ return "Wrong 'cpu' requirements: {}, should be integer".format(suite_cpu_requirements)
if suite_cpu_requirements < min_cpu_requirements or suite_cpu_requirements > consts.TestRequirementsConstants.get_cpu_value(max_cpu_requirements):
- return "Wrong 'cpu' requirement: {}, should be in [{}..{}] for {}-size tests".format(suite_cpu_requirements, min_cpu_requirements, max_cpu_requirements, test_size)
+ return "Wrong 'cpu' requirement: {}, should be in [{}..{}] for {}-size tests".format(suite_cpu_requirements, min_cpu_requirements, max_cpu_requirements, test_size)
- return None
+ return None
# TODO: Remove is_kvm param when there will be guarantees on RAM
def check_ram(suite_ram_requirements, test_size, is_kvm=False):
if not isinstance(suite_ram_requirements, int):
- return "Wrong 'ram' requirements: {}, should be integer".format(suite_ram_requirements)
+ return "Wrong 'ram' requirements: {}, should be integer".format(suite_ram_requirements)
min_ram_requirements = consts.TestRequirementsConstants.MinRam
max_ram_requirements = consts.MAX_RAM_REQUIREMENTS_FOR_KVM if is_kvm else consts.TestSize.get_max_requirements(test_size).get(consts.TestRequirements.Ram)
if suite_ram_requirements < min_ram_requirements or suite_ram_requirements > max_ram_requirements:
@@ -29,7 +29,7 @@ def check_ram(suite_ram_requirements, test_size, is_kvm=False):
if is_kvm:
err_msg += ' with kvm requirements'
return err_msg
- return None
+ return None
def check_ram_disk(suite_ram_disk, test_size, is_kvm=False):
@@ -37,13 +37,13 @@ def check_ram_disk(suite_ram_disk, test_size, is_kvm=False):
max_ram_disk = consts.TestSize.get_max_requirements(test_size).get(consts.TestRequirements.RamDisk)
if isinstance(suite_ram_disk, str):
if all(consts.TestRequirementsConstants.is_all_ram_disk(req) for req in (max_ram_disk, suite_ram_disk)):
- return None
- return "Wrong 'ram_disk' requirements: {}, should be in [{}..{}] for {}-size tests".format(suite_ram_disk, 0, max_ram_disk, test_size)
+ return None
+ return "Wrong 'ram_disk' requirements: {}, should be in [{}..{}] for {}-size tests".format(suite_ram_disk, 0, max_ram_disk, test_size)
if not isinstance(suite_ram_disk, int):
- return "Wrong 'ram_disk' requirements: {}, should be integer".format(suite_ram_disk)
+ return "Wrong 'ram_disk' requirements: {}, should be integer".format(suite_ram_disk)
if suite_ram_disk < min_ram_disk or suite_ram_disk > consts.TestRequirementsConstants.get_ram_disk_value(max_ram_disk):
- return "Wrong 'ram_disk' requirement: {}, should be in [{}..{}] for {}-size tests".format(suite_ram_disk, min_ram_disk, max_ram_disk, test_size)
+ return "Wrong 'ram_disk' requirement: {}, should be in [{}..{}] for {}-size tests".format(suite_ram_disk, min_ram_disk, max_ram_disk, test_size)
- return None
+ return None
diff --git a/build/plugins/_test_const.py b/build/plugins/_test_const.py
index 33e9b989c3..0d03cc3d17 100644
--- a/build/plugins/_test_const.py
+++ b/build/plugins/_test_const.py
@@ -1,34 +1,34 @@
-# coding: utf-8
+# coding: utf-8
import re
import sys
-
-RESTART_TEST_INDICATOR = '##restart-test##'
-INFRASTRUCTURE_ERROR_INDICATOR = '##infrastructure-error##'
-
-RESTART_TEST_INDICATORS = [
- RESTART_TEST_INDICATOR,
- "network error",
-]
-
-# testing
-BIN_DIRECTORY = 'bin'
-CANONIZATION_RESULT_FILE_NAME = "canonization_res.json"
-CONSOLE_SNIPPET_LIMIT = 5000
-LIST_NODE_LOG_FILE = "test_list.log"
-LIST_NODE_RESULT_FILE = "test_list.json"
-LIST_RESULT_NODE_LOG_FILE = "list_result.log"
-MAX_FILE_SIZE = 1024 * 1024 * 2 # 2 MB
-MAX_TEST_RESTART_COUNT = 3
-REPORT_SNIPPET_LIMIT = 10000
-SANITIZER_ERROR_RC = 100
-TEST_SUBTEST_SEPARATOR = '::'
-TESTING_OUT_DIR_NAME = "testing_out_stuff"
+
+RESTART_TEST_INDICATOR = '##restart-test##'
+INFRASTRUCTURE_ERROR_INDICATOR = '##infrastructure-error##'
+
+RESTART_TEST_INDICATORS = [
+ RESTART_TEST_INDICATOR,
+ "network error",
+]
+
+# testing
+BIN_DIRECTORY = 'bin'
+CANONIZATION_RESULT_FILE_NAME = "canonization_res.json"
+CONSOLE_SNIPPET_LIMIT = 5000
+LIST_NODE_LOG_FILE = "test_list.log"
+LIST_NODE_RESULT_FILE = "test_list.json"
+LIST_RESULT_NODE_LOG_FILE = "list_result.log"
+MAX_FILE_SIZE = 1024 * 1024 * 2 # 2 MB
+MAX_TEST_RESTART_COUNT = 3
+REPORT_SNIPPET_LIMIT = 10000
+SANITIZER_ERROR_RC = 100
+TEST_SUBTEST_SEPARATOR = '::'
+TESTING_OUT_DIR_NAME = "testing_out_stuff"
TESTING_OUT_TAR_NAME = TESTING_OUT_DIR_NAME + ".tar"
-TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
-TRACE_FILE_NAME = "ytest.report.trace"
-TRUNCATING_IGNORE_FILE_LIST = {TRACE_FILE_NAME, "run_test.log"}
-
+TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
+TRACE_FILE_NAME = "ytest.report.trace"
+TRUNCATING_IGNORE_FILE_LIST = {TRACE_FILE_NAME, "run_test.log"}
+
# kvm
DEFAULT_RAM_REQUIREMENTS_FOR_KVM = 4
MAX_RAM_REQUIREMENTS_FOR_KVM = 16
@@ -37,29 +37,29 @@ MAX_RAM_REQUIREMENTS_FOR_KVM = 16
TEST_NODE_FINISHING_TIME = 5 * 60
DEFAULT_TEST_NODE_TIMEOUT = 15 * 60
-# coverage
+# coverage
COVERAGE_TESTS_TIMEOUT_FACTOR = 1.5
COVERAGE_RESOLVED_FILE_NAME_PATTERN = "coverage_resolved.{}.json"
CPP_COVERAGE_RESOLVED_FILE_NAME = COVERAGE_RESOLVED_FILE_NAME_PATTERN.format("cpp")
JAVA_COVERAGE_RESOLVED_FILE_NAME = COVERAGE_RESOLVED_FILE_NAME_PATTERN.format("java")
PYTHON_COVERAGE_RESOLVED_FILE_NAME = COVERAGE_RESOLVED_FILE_NAME_PATTERN.format("python")
CLANG_COVERAGE_TEST_TYPES = ("unittest", "coverage_extractor", "pytest", "py3test", "gtest", "boost_test", "exectest")
-COVERAGE_TABLE_CHUNKS = 20
-COVERAGE_YT_PROXY = "hahn.yt.yandex.net"
-COVERAGE_YT_ROOT_PATH = "//home/codecoverage"
-COVERAGE_YT_TABLE_PREFIX = "datatable"
-
-# fuzzing
-CORPUS_DATA_FILE_NAME = 'corpus.json'
-CORPUS_DATA_ROOT_DIR = 'fuzzing'
-CORPUS_DIR_NAME = 'corpus'
-FUZZING_COMPRESSION_COEF = 1.1
-FUZZING_DEFAULT_TIMEOUT = 3600
+COVERAGE_TABLE_CHUNKS = 20
+COVERAGE_YT_PROXY = "hahn.yt.yandex.net"
+COVERAGE_YT_ROOT_PATH = "//home/codecoverage"
+COVERAGE_YT_TABLE_PREFIX = "datatable"
+
+# fuzzing
+CORPUS_DATA_FILE_NAME = 'corpus.json'
+CORPUS_DATA_ROOT_DIR = 'fuzzing'
+CORPUS_DIR_NAME = 'corpus'
+FUZZING_COMPRESSION_COEF = 1.1
+FUZZING_DEFAULT_TIMEOUT = 3600
FUZZING_FINISHING_TIME = 600
-FUZZING_TIMEOUT_RE = re.compile(r'(^|\s)-max_total_time=(?P<max_time>\d+)')
-GENERATED_CORPUS_DIR_NAME = 'mined_corpus'
-MAX_CORPUS_RESOURCES_ALLOWED = 5
-
+FUZZING_TIMEOUT_RE = re.compile(r'(^|\s)-max_total_time=(?P<max_time>\d+)')
+GENERATED_CORPUS_DIR_NAME = 'mined_corpus'
+MAX_CORPUS_RESOURCES_ALLOWED = 5
+
TEST_TOOL_HOST = 'TEST_TOOL_HOST_RESOURCE_GLOBAL'
TEST_TOOL_TARGET = 'TEST_TOOL_TARGET_RESOURCE_GLOBAL'
TEST_TOOL_HOST_LOCAL = 'TEST_TOOL_HOST_LOCAL'
@@ -73,15 +73,15 @@ FLAKES_PY2_RESOURCE = 'FLAKES_PY2_RESOURCE_GLOBAL'
FLAKES_PY3_RESOURCE = 'FLAKES_PY3_RESOURCE_GLOBAL'
FLAKE8_PY2_RESOURCE = 'FLAKE8_PY2_RESOURCE_GLOBAL'
FLAKE8_PY3_RESOURCE = 'FLAKE8_PY3_RESOURCE_GLOBAL'
-
-
-class Enum(object):
-
- @classmethod
- def enumerate(cls):
- return [v for k, v in cls.__dict__.items() if not k.startswith("_")]
-
-
+
+
+class Enum(object):
+
+ @classmethod
+ def enumerate(cls):
+ return [v for k, v in cls.__dict__.items() if not k.startswith("_")]
+
+
class TestRequirements(Enum):
Container = 'container'
Cpu = 'cpu'
@@ -119,7 +119,7 @@ class TestRequirementsConstants(Enum):
return cls.AllRamDiskValue if cls.is_all_ram_disk(value) else value
-class TestSize(Enum):
+class TestSize(Enum):
Small = 'small'
Medium = 'medium'
Large = 'large'
@@ -172,7 +172,7 @@ class TestSize(Enum):
TestRequirements.RamDisk: 4,
},
Large: {
- TestRequirements.Cpu: 4,
+ TestRequirements.Cpu: 4,
TestRequirements.Ram: 32,
TestRequirements.RamDisk: 4,
},
@@ -207,14 +207,14 @@ class TestSize(Enum):
raise Exception("Unknown test size '{}'".format(size))
-class TestRunExitCode(Enum):
+class TestRunExitCode(Enum):
Skipped = 2
Failed = 3
TimeOut = 10
- InfrastructureError = 12
+ InfrastructureError = 12
-class YaTestTags(Enum):
+class YaTestTags(Enum):
Manual = "ya:manual"
Notags = "ya:notags"
Norestart = "ya:norestart"
@@ -228,100 +228,100 @@ class YaTestTags(Enum):
class Status(object):
- GOOD, XFAIL, FAIL, XPASS, MISSING, CRASHED, TIMEOUT = range(1, 8)
+ GOOD, XFAIL, FAIL, XPASS, MISSING, CRASHED, TIMEOUT = range(1, 8)
SKIPPED = -100
NOT_LAUNCHED = -200
CANON_DIFF = -300
DESELECTED = -400
INTERNAL = -sys.maxint
- FLAKY = -50
+ FLAKY = -50
BY_NAME = {'good': GOOD, 'fail': FAIL, 'xfail': XFAIL, 'xpass': XPASS, 'missing': MISSING, 'crashed': CRASHED,
'skipped': SKIPPED, 'flaky': FLAKY, 'not_launched': NOT_LAUNCHED, 'timeout': TIMEOUT, 'diff': CANON_DIFF,
'internal': INTERNAL, 'deselected': DESELECTED}
TO_STR = {GOOD: 'good', FAIL: 'fail', XFAIL: 'xfail', XPASS: 'xpass', MISSING: 'missing', CRASHED: 'crashed',
SKIPPED: 'skipped', FLAKY: 'flaky', NOT_LAUNCHED: 'not_launched', TIMEOUT: 'timeout', CANON_DIFF: 'diff',
INTERNAL: 'internal', DESELECTED: 'deselected'}
-
-
-class _Colors(object):
-
- _NAMES = [
- "blue",
- "cyan",
- "default",
- "green",
- "grey",
- "magenta",
- "red",
- "white",
- "yellow",
- ]
+
+
+class _Colors(object):
+
+ _NAMES = [
+ "blue",
+ "cyan",
+ "default",
+ "green",
+ "grey",
+ "magenta",
+ "red",
+ "white",
+ "yellow",
+ ]
_PREFIXES = ["", "light", "dark"]
-
- def __init__(self):
- self._table = {}
- for prefix in self._PREFIXES:
- for value in self._NAMES:
- name = value
- if prefix:
- name = "{}_{}".format(prefix, value)
- value = "{}-{}".format(prefix, value)
- self.__add_color(name.upper(), value)
-
- def __add_color(self, name, value):
- self._table[name] = value
- self.__setattr__(name, value)
-
-
-Colors = _Colors()
-
-
-class _Highlight(object):
-
- _MARKERS = {
- # special
- "RESET": "rst",
-
- "IMPORTANT": "imp",
- "UNIMPORTANT": "unimp",
- "BAD": "bad",
- "WARNING": "warn",
- "GOOD": "good",
- "PATH": "path",
- "ALTERNATIVE1": "alt1",
- "ALTERNATIVE2": "alt2",
- "ALTERNATIVE3": "alt3",
- }
-
- def __init__(self):
- # setting attributes because __getattr__ is much slower
- for attr, value in self._MARKERS.items():
- self.__setattr__(attr, value)
-
-
-Highlight = _Highlight()
-
-
-class _StatusColorMap(object):
-
- _MAP = {
- 'good': Highlight.GOOD,
- 'fail': Highlight.BAD,
- 'missing': Highlight.ALTERNATIVE1,
- 'crashed': Highlight.WARNING,
- 'skipped': Highlight.UNIMPORTANT,
- 'not_launched': Highlight.BAD,
- 'timeout': Highlight.BAD,
- 'flaky': Highlight.ALTERNATIVE3,
- 'xfail': Highlight.WARNING,
+
+ def __init__(self):
+ self._table = {}
+ for prefix in self._PREFIXES:
+ for value in self._NAMES:
+ name = value
+ if prefix:
+ name = "{}_{}".format(prefix, value)
+ value = "{}-{}".format(prefix, value)
+ self.__add_color(name.upper(), value)
+
+ def __add_color(self, name, value):
+ self._table[name] = value
+ self.__setattr__(name, value)
+
+
+Colors = _Colors()
+
+
+class _Highlight(object):
+
+ _MARKERS = {
+ # special
+ "RESET": "rst",
+
+ "IMPORTANT": "imp",
+ "UNIMPORTANT": "unimp",
+ "BAD": "bad",
+ "WARNING": "warn",
+ "GOOD": "good",
+ "PATH": "path",
+ "ALTERNATIVE1": "alt1",
+ "ALTERNATIVE2": "alt2",
+ "ALTERNATIVE3": "alt3",
+ }
+
+ def __init__(self):
+ # setting attributes because __getattr__ is much slower
+ for attr, value in self._MARKERS.items():
+ self.__setattr__(attr, value)
+
+
+Highlight = _Highlight()
+
+
+class _StatusColorMap(object):
+
+ _MAP = {
+ 'good': Highlight.GOOD,
+ 'fail': Highlight.BAD,
+ 'missing': Highlight.ALTERNATIVE1,
+ 'crashed': Highlight.WARNING,
+ 'skipped': Highlight.UNIMPORTANT,
+ 'not_launched': Highlight.BAD,
+ 'timeout': Highlight.BAD,
+ 'flaky': Highlight.ALTERNATIVE3,
+ 'xfail': Highlight.WARNING,
'xpass': Highlight.WARNING,
- 'diff': Highlight.BAD,
- 'internal': Highlight.BAD,
- 'deselected': Highlight.UNIMPORTANT,
- }
-
- def __getitem__(self, item):
- return self._MAP[item]
-
-
-StatusColorMap = _StatusColorMap()
+ 'diff': Highlight.BAD,
+ 'internal': Highlight.BAD,
+ 'deselected': Highlight.UNIMPORTANT,
+ }
+
+ def __getitem__(self, item):
+ return self._MAP[item]
+
+
+StatusColorMap = _StatusColorMap()
diff --git a/build/plugins/code_generator.py b/build/plugins/code_generator.py
index e527c2b8bb..ca8bb18c15 100644
--- a/build/plugins/code_generator.py
+++ b/build/plugins/code_generator.py
@@ -3,7 +3,7 @@ import os
import _import_wrapper as iw
-pattern = re.compile(r"#include\s*[<\"](?P<INDUCED>[^>\"]+)[>\"]|(?:@|{@)\s*(?:import|include|from)\s*[\"'](?P<INCLUDE>[^\"']+)[\"']")
+pattern = re.compile(r"#include\s*[<\"](?P<INDUCED>[^>\"]+)[>\"]|(?:@|{@)\s*(?:import|include|from)\s*[\"'](?P<INCLUDE>[^\"']+)[\"']")
class CodeGeneratorTemplateParser(object):
diff --git a/build/plugins/macros_with_error.py b/build/plugins/macros_with_error.py
index 4cd0544d18..e82fb56d2c 100644
--- a/build/plugins/macros_with_error.py
+++ b/build/plugins/macros_with_error.py
@@ -1,26 +1,26 @@
import sys
-import _common
+import _common
+
+import ymake
+
-import ymake
-
-
def onmacros_with_error(unit, *args):
print >> sys.stderr, 'This macros will fail'
raise Exception('Expected fail in MACROS_WITH_ERROR')
-
-
-def onrestrict_path(unit, *args):
- if args:
- if 'MSG' in args:
- pos = args.index('MSG')
- paths, msg = args[:pos], args[pos + 1:]
- msg = ' '.join(msg)
- else:
- paths, msg = args, 'forbidden'
- if not _common.strip_roots(unit.path()).startswith(paths):
- error_msg = "Path '[[imp]]{}[[rst]]' is restricted - [[bad]]{}[[rst]]. Valid path prefixes are: [[unimp]]{}[[rst]]".format(unit.path(), msg, ', '.join(paths))
- ymake.report_configure_error(error_msg)
+
+
+def onrestrict_path(unit, *args):
+ if args:
+ if 'MSG' in args:
+ pos = args.index('MSG')
+ paths, msg = args[:pos], args[pos + 1:]
+ msg = ' '.join(msg)
+ else:
+ paths, msg = args, 'forbidden'
+ if not _common.strip_roots(unit.path()).startswith(paths):
+ error_msg = "Path '[[imp]]{}[[rst]]' is restricted - [[bad]]{}[[rst]]. Valid path prefixes are: [[unimp]]{}[[rst]]".format(unit.path(), msg, ', '.join(paths))
+ ymake.report_configure_error(error_msg)
def onassert(unit, *args):
val = unit.get(args[0])
diff --git a/build/plugins/pybuild.py b/build/plugins/pybuild.py
index 1e18f14051..f32a2d39a0 100644
--- a/build/plugins/pybuild.py
+++ b/build/plugins/pybuild.py
@@ -1,7 +1,7 @@
-import os
-import collections
+import os
+import collections
from hashlib import md5
-
+
import ymake
from _common import stripext, rootrel_arc_src, tobuilddir, listid, resolve_to_ymake_path, generate_chunks, pathid
@@ -61,55 +61,55 @@ def mangle(name):
return ''.join('{}{}'.format(len(s), s) for s in name.split('.'))
-def parse_pyx_includes(filename, path, source_root, seen=None):
- normpath = lambda *x: os.path.normpath(os.path.join(*x))
-
- abs_path = normpath(source_root, filename)
- seen = seen or set()
- if abs_path in seen:
- return
- seen.add(abs_path)
-
- if not os.path.exists(abs_path):
- # File might be missing, because it might be generated
- return
-
- with open(abs_path, 'rb') as f:
- # Don't parse cimports and etc - irrelevant for cython, it's linker work
+def parse_pyx_includes(filename, path, source_root, seen=None):
+ normpath = lambda *x: os.path.normpath(os.path.join(*x))
+
+ abs_path = normpath(source_root, filename)
+ seen = seen or set()
+ if abs_path in seen:
+ return
+ seen.add(abs_path)
+
+ if not os.path.exists(abs_path):
+ # File might be missing, because it might be generated
+ return
+
+ with open(abs_path, 'rb') as f:
+ # Don't parse cimports and etc - irrelevant for cython, it's linker work
includes = ymake.parse_cython_includes(f.read())
-
- abs_dirname = os.path.dirname(abs_path)
- # All includes are relative to the file which include
- path_dirname = os.path.dirname(path)
- file_dirname = os.path.dirname(filename)
-
- for incfile in includes:
- abs_path = normpath(abs_dirname, incfile)
- if os.path.exists(abs_path):
- incname, incpath = normpath(file_dirname, incfile), normpath(path_dirname, incfile)
- yield (incname, incpath)
- # search for includes in the included files
- for e in parse_pyx_includes(incname, incpath, source_root, seen):
- yield e
- else:
- # There might be arcadia root or cython relative include.
- # Don't treat such file as missing, because there must be PEERDIR on py_library
- # which contains it.
- for path in [
- source_root,
- source_root + "/contrib/tools/cython/Cython/Includes",
- ]:
- if os.path.exists(normpath(path, incfile)):
- break
- else:
- ymake.report_configure_error("'{}' includes missing file: {} ({})".format(path, incfile, abs_path))
-
+
+ abs_dirname = os.path.dirname(abs_path)
+ # All includes are relative to the file which include
+ path_dirname = os.path.dirname(path)
+ file_dirname = os.path.dirname(filename)
+
+ for incfile in includes:
+ abs_path = normpath(abs_dirname, incfile)
+ if os.path.exists(abs_path):
+ incname, incpath = normpath(file_dirname, incfile), normpath(path_dirname, incfile)
+ yield (incname, incpath)
+ # search for includes in the included files
+ for e in parse_pyx_includes(incname, incpath, source_root, seen):
+ yield e
+ else:
+ # There might be arcadia root or cython relative include.
+ # Don't treat such file as missing, because there must be PEERDIR on py_library
+ # which contains it.
+ for path in [
+ source_root,
+ source_root + "/contrib/tools/cython/Cython/Includes",
+ ]:
+ if os.path.exists(normpath(path, incfile)):
+ break
+ else:
+ ymake.report_configure_error("'{}' includes missing file: {} ({})".format(path, incfile, abs_path))
+
def has_pyx(args):
return any(arg.endswith('.pyx') for arg in args)
-
-def get_srcdir(path, unit):
- return rootrel_arc_src(path, unit)[:-len(path)].rstrip('/')
-
+
+def get_srcdir(path, unit):
+ return rootrel_arc_src(path, unit)[:-len(path)].rstrip('/')
+
def add_python_lint_checks(unit, py_ver, files):
def get_resolved_files():
resolved_files = []
@@ -119,27 +119,27 @@ def add_python_lint_checks(unit, py_ver, files):
resolved_files.append(resolved)
return resolved_files
- if unit.get('LINT_LEVEL_VALUE') == "none":
-
- no_lint_allowed_paths = (
- "contrib/",
- "devtools/",
- "junk/",
- # temporary allowed, TODO: remove
- "taxi/uservices/",
- "travel/",
+ if unit.get('LINT_LEVEL_VALUE') == "none":
+
+ no_lint_allowed_paths = (
+ "contrib/",
+ "devtools/",
+ "junk/",
+ # temporary allowed, TODO: remove
+ "taxi/uservices/",
+ "travel/",
"market/report/lite/", # MARKETOUT-38662, deadline: 2021-08-12
"passport/backend/oauth/", # PASSP-35982
- )
-
- upath = unit.path()[3:]
-
- if not upath.startswith(no_lint_allowed_paths):
- ymake.report_configure_error("NO_LINT() is allowed only in " + ", ".join(no_lint_allowed_paths))
-
- if files and unit.get('LINT_LEVEL_VALUE') not in ("none", "none_internal"):
+ )
+
+ upath = unit.path()[3:]
+
+ if not upath.startswith(no_lint_allowed_paths):
+ ymake.report_configure_error("NO_LINT() is allowed only in " + ", ".join(no_lint_allowed_paths))
+
+ if files and unit.get('LINT_LEVEL_VALUE') not in ("none", "none_internal"):
resolved_files = get_resolved_files()
- flake8_cfg = 'build/config/tests/flake8/flake8.conf'
+ flake8_cfg = 'build/config/tests/flake8/flake8.conf'
unit.onadd_check(["flake8.py{}".format(py_ver), flake8_cfg] + resolved_files)
@@ -214,14 +214,14 @@ def onpy_srcs(unit, *args):
ns = ""
else:
ns = (unit.get('PY_NAMESPACE_VALUE') or upath.replace('/', '.')) + '.'
-
- cython_coverage = unit.get('CYTHON_COVERAGE') == 'yes'
+
+ cython_coverage = unit.get('CYTHON_COVERAGE') == 'yes'
cythonize_py = False
optimize_proto = unit.get('OPTIMIZE_PY_PROTOS_FLAG') == 'yes'
-
+
cython_directives = []
- if cython_coverage:
- cython_directives += ['-X', 'linetrace=True']
+ if cython_coverage:
+ cython_directives += ['-X', 'linetrace=True']
pyxs_c = []
pyxs_c_h = []
@@ -353,41 +353,41 @@ def onpy_srcs(unit, *args):
dump_output.close()
if pyxs:
- files2res = set()
- # Include map stores files which were included in the processing pyx file,
- # to be able to find source code of the included file inside generated file
- # for currently processing pyx file.
- include_map = collections.defaultdict(set)
-
- if cython_coverage:
- def process_pyx(filename, path, out_suffix, noext):
- # skip generated files
- if not is_arc_src(path, unit):
- return
- # source file
- files2res.add((filename, path))
- # generated
- if noext:
- files2res.add((os.path.splitext(filename)[0] + out_suffix, os.path.splitext(path)[0] + out_suffix))
- else:
- files2res.add((filename + out_suffix, path + out_suffix))
- # used includes
- for entry in parse_pyx_includes(filename, path, unit.resolve('$S')):
- files2res.add(entry)
- include_arc_rel = entry[0]
- include_map[filename].add(include_arc_rel)
- else:
- def process_pyx(filename, path, out_suffix, noext):
- pass
-
- for pyxs, cython, out_suffix, noext in [
+ files2res = set()
+ # Include map stores files which were included in the processing pyx file,
+ # to be able to find source code of the included file inside generated file
+ # for currently processing pyx file.
+ include_map = collections.defaultdict(set)
+
+ if cython_coverage:
+ def process_pyx(filename, path, out_suffix, noext):
+ # skip generated files
+ if not is_arc_src(path, unit):
+ return
+ # source file
+ files2res.add((filename, path))
+ # generated
+ if noext:
+ files2res.add((os.path.splitext(filename)[0] + out_suffix, os.path.splitext(path)[0] + out_suffix))
+ else:
+ files2res.add((filename + out_suffix, path + out_suffix))
+ # used includes
+ for entry in parse_pyx_includes(filename, path, unit.resolve('$S')):
+ files2res.add(entry)
+ include_arc_rel = entry[0]
+ include_map[filename].add(include_arc_rel)
+ else:
+ def process_pyx(filename, path, out_suffix, noext):
+ pass
+
+ for pyxs, cython, out_suffix, noext in [
(pyxs_c, unit.on_buildwith_cython_c_dep, ".c", False),
(pyxs_c_h, unit.on_buildwith_cython_c_h, ".c", True),
(pyxs_c_api_h, unit.on_buildwith_cython_c_api_h, ".c", True),
(pyxs_cpp, unit.on_buildwith_cython_cpp_dep, ".cpp", False),
]:
for path, mod in pyxs:
- filename = rootrel_arc_src(path, unit)
+ filename = rootrel_arc_src(path, unit)
cython_args = [path]
dep = path
@@ -400,26 +400,26 @@ def onpy_srcs(unit, *args):
cython_args += [
'--module-name', mod,
'--init-suffix', mangle(mod),
- '--source-root', '${ARCADIA_ROOT}',
- # set arcadia root relative __file__ for generated modules
- '-X', 'set_initial_path={}'.format(filename),
+ '--source-root', '${ARCADIA_ROOT}',
+ # set arcadia root relative __file__ for generated modules
+ '-X', 'set_initial_path={}'.format(filename),
] + cython_directives
cython(cython_args)
py_register(unit, mod, py3)
- process_pyx(filename, path, out_suffix, noext)
-
- if files2res:
- # Compile original and generated sources into target for proper cython coverage calculation
- unit.onresource_files([x for name, path in files2res for x in ('DEST', name, path)])
-
- if include_map:
- data = []
- prefix = 'resfs/cython/include'
- for line in sorted('{}/{}={}'.format(prefix, filename, ':'.join(sorted(files))) for filename, files in include_map.iteritems()):
- data += ['-', line]
- unit.onresource(data)
-
+ process_pyx(filename, path, out_suffix, noext)
+
+ if files2res:
+ # Compile original and generated sources into target for proper cython coverage calculation
+ unit.onresource_files([x for name, path in files2res for x in ('DEST', name, path)])
+
+ if include_map:
+ data = []
+ prefix = 'resfs/cython/include'
+ for line in sorted('{}/{}={}'.format(prefix, filename, ':'.join(sorted(files))) for filename, files in include_map.iteritems()):
+ data += ['-', line]
+ unit.onresource(data)
+
for swigs, on_swig_python in [
(swigs_c, unit.on_swig_python_c),
(swigs_cpp, unit.on_swig_python_cpp),
@@ -433,11 +433,11 @@ def onpy_srcs(unit, *args):
onpy_srcs(unit, swg_py + '=' + mod)
if pys:
- pys_seen = set()
- pys_dups = {m for _, m in pys if (m in pys_seen or pys_seen.add(m))}
- if pys_dups:
- ymake.report_configure_error('Duplicate(s) is found in the PY_SRCS macro: {}'.format(pys_dups))
-
+ pys_seen = set()
+ pys_dups = {m for _, m in pys if (m in pys_seen or pys_seen.add(m))}
+ if pys_dups:
+ ymake.report_configure_error('Duplicate(s) is found in the PY_SRCS macro: {}'.format(pys_dups))
+
res = []
if py3:
@@ -523,10 +523,10 @@ def onpy_srcs(unit, *args):
def _check_test_srcs(*args):
- used = set(args) & {"NAMESPACE", "TOP_LEVEL", "__main__.py"}
- if used:
- param = list(used)[0]
- ymake.report_configure_error('in TEST_SRCS: you cannot use {} here - it would broke testing machinery'.format(param))
+ used = set(args) & {"NAMESPACE", "TOP_LEVEL", "__main__.py"}
+ if used:
+ param = list(used)[0]
+ ymake.report_configure_error('in TEST_SRCS: you cannot use {} here - it would broke testing machinery'.format(param))
def ontest_srcs(unit, *args):
@@ -606,21 +606,21 @@ def onpy_main(unit, arg):
arg += ':main'
py_main(unit, arg)
-
-
-def onpy_constructor(unit, arg):
- """
- @usage: PY_CONSTRUCTOR(package.module[:func])
-
- Specifies the module or function which will be started before python's main()
- init() is expected in the target module if no function is specified
- Can be considered as __attribute__((constructor)) for python
- """
- if ':' not in arg:
- arg = arg + '=init'
- else:
- arg[arg.index(':')] = '='
- unit.onresource(['-', 'py/constructors/{}'.format(arg)])
+
+
+def onpy_constructor(unit, arg):
+ """
+ @usage: PY_CONSTRUCTOR(package.module[:func])
+
+ Specifies the module or function which will be started before python's main()
+ init() is expected in the target module if no function is specified
+ Can be considered as __attribute__((constructor)) for python
+ """
+ if ':' not in arg:
+ arg = arg + '=init'
+ else:
+ arg[arg.index(':')] = '='
+ unit.onresource(['-', 'py/constructors/{}'.format(arg)])
def onpy_enums_serialization(unit, *args):
ns = ''
diff --git a/build/plugins/res.py b/build/plugins/res.py
index fccfb51eb5..a937caba81 100644
--- a/build/plugins/res.py
+++ b/build/plugins/res.py
@@ -9,7 +9,7 @@ def split(lst, limit):
filepath = None
lenght = 0
bucket = []
-
+
for item in lst:
if filepath:
lenght += root_lenght + len(filepath) + len(item)
@@ -17,17 +17,17 @@ def split(lst, limit):
yield bucket
bucket = []
lenght = 0
-
+
bucket.append(filepath)
bucket.append(item)
filepath = None
else:
filepath = item
-
+
if bucket:
yield bucket
-
-
+
+
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
@@ -38,8 +38,8 @@ def onfat_resource(unit, *args):
unit.onpeerdir(['library/cpp/resource'])
# Since the maximum length of lpCommandLine string for CreateProcess is 8kb (windows) characters,
- # we make several calls of rescompiler
- # https://msdn.microsoft.com/ru-ru/library/windows/desktop/ms682425.aspx
+ # we make several calls of rescompiler
+ # https://msdn.microsoft.com/ru-ru/library/windows/desktop/ms682425.aspx
for part_args in split(args, 8000):
output = listid(part_args) + '.cpp'
inputs = [x for x, y in iterpair(part_args) if x != '-']
@@ -47,7 +47,7 @@ def onfat_resource(unit, *args):
inputs = ['IN'] + inputs
unit.onrun_program(['tools/rescompiler', output] + part_args + inputs + ['OUT_NOAUTO', output])
- unit.onsrcs(['GLOBAL', output])
+ unit.onsrcs(['GLOBAL', output])
def onresource_files(unit, *args):
diff --git a/build/plugins/suppressions.py b/build/plugins/suppressions.py
index a5e6bd2188..6f4a1b4f03 100644
--- a/build/plugins/suppressions.py
+++ b/build/plugins/suppressions.py
@@ -1,19 +1,19 @@
-def onsuppressions(unit, *args):
- """
- SUPPRESSIONS() - allows to specify files with suppression notation which will be used by
- address, leak or thread sanitizer runtime by default.
- Use asan.supp filename for address sanitizer, lsan.supp for leak sanitizer
- and tsan.supp for thread sanitizer suppressions respectively.
- See https://clang.llvm.org/docs/AddressSanitizer.html#suppressing-memory-leaks
- for details.
- """
- import os
-
- valid = ("asan.supp", "tsan.supp", "lsan.supp")
-
- if unit.get("SANITIZER_TYPE") in ("leak", "address", "thread"):
- for x in args:
- if os.path.basename(x) not in valid:
- unit.message(['error', "Invalid suppression filename: {} (any of the following is expected: {})".format(x, valid)])
- return
- unit.onsrcs(["GLOBAL"] + list(args))
+def onsuppressions(unit, *args):
+ """
+ SUPPRESSIONS() - allows to specify files with suppression notation which will be used by
+ address, leak or thread sanitizer runtime by default.
+ Use asan.supp filename for address sanitizer, lsan.supp for leak sanitizer
+ and tsan.supp for thread sanitizer suppressions respectively.
+ See https://clang.llvm.org/docs/AddressSanitizer.html#suppressing-memory-leaks
+ for details.
+ """
+ import os
+
+ valid = ("asan.supp", "tsan.supp", "lsan.supp")
+
+ if unit.get("SANITIZER_TYPE") in ("leak", "address", "thread"):
+ for x in args:
+ if os.path.basename(x) not in valid:
+ unit.message(['error', "Invalid suppression filename: {} (any of the following is expected: {})".format(x, valid)])
+ return
+ unit.onsrcs(["GLOBAL"] + list(args))
diff --git a/build/plugins/tests/test_requirements.py b/build/plugins/tests/test_requirements.py
index 7d1a9b98b1..24d57ac901 100644
--- a/build/plugins/tests/test_requirements.py
+++ b/build/plugins/tests/test_requirements.py
@@ -32,7 +32,7 @@ class TestRequirements(object):
assert not requirements.check_ram(1, test_size)
assert not requirements.check_ram(4, test_size)
assert not requirements.check_ram(5, test_size)
- assert not requirements.check_ram(32, consts.TestSize.Large)
+ assert not requirements.check_ram(32, consts.TestSize.Large)
assert requirements.check_ram(48, consts.TestSize.Large)
assert not requirements.check_ram(1, test_size, is_kvm=True)
diff --git a/build/plugins/ytest.py b/build/plugins/ytest.py
index f58d00c99c..8970837f0f 100644
--- a/build/plugins/ytest.py
+++ b/build/plugins/ytest.py
@@ -16,20 +16,20 @@ import collections
import ymake
-MDS_URI_PREFIX = 'https://storage.yandex-team.ru/get-devtools/'
-MDS_SHEME = 'mds'
-CANON_DATA_DIR_NAME = 'canondata'
-CANON_OUTPUT_STORAGE = 'canondata_storage'
-CANON_RESULT_FILE_NAME = 'result.json'
-CANON_MDS_RESOURCE_REGEX = re.compile(re.escape(MDS_URI_PREFIX) + r'(.*?)($|#)')
-CANON_SB_VAULT_REGEX = re.compile(r"\w+=(value|file):[-\w]+:\w+")
-CANON_SBR_RESOURCE_REGEX = re.compile(r'(sbr:/?/?(\d+))')
-
+MDS_URI_PREFIX = 'https://storage.yandex-team.ru/get-devtools/'
+MDS_SHEME = 'mds'
+CANON_DATA_DIR_NAME = 'canondata'
+CANON_OUTPUT_STORAGE = 'canondata_storage'
+CANON_RESULT_FILE_NAME = 'result.json'
+CANON_MDS_RESOURCE_REGEX = re.compile(re.escape(MDS_URI_PREFIX) + r'(.*?)($|#)')
+CANON_SB_VAULT_REGEX = re.compile(r"\w+=(value|file):[-\w]+:\w+")
+CANON_SBR_RESOURCE_REGEX = re.compile(r'(sbr:/?/?(\d+))')
+
VALID_NETWORK_REQUIREMENTS = ("full", "restricted")
VALID_DNS_REQUIREMENTS = ("default", "local", "dns64")
BLOCK_SEPARATOR = '============================================================='
-SPLIT_FACTOR_MAX_VALUE = 1000
-SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
+SPLIT_FACTOR_MAX_VALUE = 1000
+SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
DEFAULT_TIDY_CONFIG = "build/config/tests/clang_tidy/config.yaml"
DEFAULT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_default_map.json"
@@ -69,65 +69,65 @@ def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + per
return pool_re.search(yt_spec) and cypress_root_re.search(yt_spec)
-def validate_sb_vault(name, value):
- if not CANON_SB_VAULT_REGEX.match(value):
- return "sb_vault value '{}' should follow pattern <ENV_NAME>=:<value|file>:<owner>:<vault key>".format(value)
-
-
-def validate_numerical_requirement(name, value):
- if mr.resolve_value(value) is None:
- return "Cannot convert [[imp]]{}[[rst]] to the proper [[imp]]{}[[rst]] requirement value".format(value, name)
-
-
-def validate_choice_requirement(name, val, valid):
- if val not in valid:
- return "Unknown [[imp]]{}[[rst]] requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(name, val, ", ".join(valid))
-
-
+def validate_sb_vault(name, value):
+ if not CANON_SB_VAULT_REGEX.match(value):
+ return "sb_vault value '{}' should follow pattern <ENV_NAME>=:<value|file>:<owner>:<vault key>".format(value)
+
+
+def validate_numerical_requirement(name, value):
+ if mr.resolve_value(value) is None:
+ return "Cannot convert [[imp]]{}[[rst]] to the proper [[imp]]{}[[rst]] requirement value".format(value, name)
+
+
+def validate_choice_requirement(name, val, valid):
+ if val not in valid:
+ return "Unknown [[imp]]{}[[rst]] requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(name, val, ", ".join(valid))
+
+
def validate_force_sandbox_requirement(name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, check_func):
if is_force_sandbox or not in_autocheck or is_fuzzing or is_ytexec_run:
- if value == 'all':
- return
- return validate_numerical_requirement(name, value)
- error_msg = validate_numerical_requirement(name, value)
- if error_msg:
- return error_msg
+ if value == 'all':
+ return
+ return validate_numerical_requirement(name, value)
+ error_msg = validate_numerical_requirement(name, value)
+ if error_msg:
+ return error_msg
return check_func(mr.resolve_value(value), test_size, is_kvm)
-
-
+
+
# TODO: Remove is_kvm param when there will be guarantees on RAM
def validate_requirement(req_name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run):
- req_checks = {
- 'container': validate_numerical_requirement,
+ req_checks = {
+ 'container': validate_numerical_requirement,
'cpu': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_cpu),
- 'disk_usage': validate_numerical_requirement,
- 'dns': lambda n, v: validate_choice_requirement(n, v, VALID_DNS_REQUIREMENTS),
- 'kvm': None,
- 'network': lambda n, v: validate_choice_requirement(n, v, VALID_NETWORK_REQUIREMENTS),
+ 'disk_usage': validate_numerical_requirement,
+ 'dns': lambda n, v: validate_choice_requirement(n, v, VALID_DNS_REQUIREMENTS),
+ 'kvm': None,
+ 'network': lambda n, v: validate_choice_requirement(n, v, VALID_NETWORK_REQUIREMENTS),
'ram': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_ram),
'ram_disk': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_ram_disk),
- 'sb': None,
- 'sb_vault': validate_sb_vault,
- }
-
- if req_name not in req_checks:
- return "Unknown requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(req_name, ", ".join(sorted(req_checks)))
-
- if req_name in ('container', 'disk') and not is_force_sandbox:
- return "Only [[imp]]LARGE[[rst]] tests without [[imp]]ya:force_distbuild[[rst]] tag can have [[imp]]{}[[rst]] requirement".format(req_name)
-
- check_func = req_checks[req_name]
- if check_func:
- return check_func(req_name, value)
-
-
-def validate_test(unit, kw):
+ 'sb': None,
+ 'sb_vault': validate_sb_vault,
+ }
+
+ if req_name not in req_checks:
+ return "Unknown requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(req_name, ", ".join(sorted(req_checks)))
+
+ if req_name in ('container', 'disk') and not is_force_sandbox:
+ return "Only [[imp]]LARGE[[rst]] tests without [[imp]]ya:force_distbuild[[rst]] tag can have [[imp]]{}[[rst]] requirement".format(req_name)
+
+ check_func = req_checks[req_name]
+ if check_func:
+ return check_func(req_name, value)
+
+
+def validate_test(unit, kw):
def get_list(key):
return deserialize_list(kw.get(key, ""))
valid_kw = copy.deepcopy(kw)
errors = []
- warnings = []
+ warnings = []
if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
@@ -143,66 +143,66 @@ def validate_test(unit, kw):
size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
# TODO: use set instead list
tags = get_list("TAG")
- requirements_orig = get_list("REQUIREMENTS")
+ requirements_orig = get_list("REQUIREMENTS")
in_autocheck = "ya:not_autocheck" not in tags and 'ya:manual' not in tags
- is_fat = 'ya:fat' in tags
- is_force_sandbox = 'ya:force_distbuild' not in tags and is_fat
+ is_fat = 'ya:fat' in tags
+ is_force_sandbox = 'ya:force_distbuild' not in tags and is_fat
is_ytexec_run = 'ya:yt' in tags
- is_fuzzing = valid_kw.get("FUZZING", False)
- is_kvm = 'kvm' in requirements_orig
+ is_fuzzing = valid_kw.get("FUZZING", False)
+ is_kvm = 'kvm' in requirements_orig
requirements = {}
- list_requirements = ('sb_vault')
- for req in requirements_orig:
+ list_requirements = ('sb_vault')
+ for req in requirements_orig:
if req in ('kvm', ):
requirements[req] = str(True)
continue
if ":" in req:
req_name, req_value = req.split(":", 1)
- if req_name in list_requirements:
- requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
- else:
- if req_name in requirements:
- if req_value in ["0"]:
- warnings.append("Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
- del requirements[req_name]
- elif requirements[req_name] != req_value:
- warnings.append("Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
- requirements[req_name] = req_value
+ if req_name in list_requirements:
+ requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
+ else:
+ if req_name in requirements:
+ if req_value in ["0"]:
+ warnings.append("Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
+ del requirements[req_name]
+ elif requirements[req_name] != req_value:
+ warnings.append("Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
+ requirements[req_name] = req_value
else:
- requirements[req_name] = req_value
+ requirements[req_name] = req_value
else:
errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
- if not errors:
- for req_name, req_value in requirements.items():
+ if not errors:
+ for req_name, req_value in requirements.items():
error_msg = validate_requirement(req_name, req_value, size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run)
- if error_msg:
- errors += [error_msg]
-
+ if error_msg:
+ errors += [error_msg]
+
invalid_requirements_for_distbuild = [requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')]
- sb_tags = [tag for tag in tags if tag.startswith('sb:')]
+ sb_tags = [tag for tag in tags if tag.startswith('sb:')]
if is_fat:
- if size != consts.TestSize.Large:
- errors.append("Only LARGE test may have ya:fat tag")
-
+ if size != consts.TestSize.Large:
+ errors.append("Only LARGE test may have ya:fat tag")
+
if in_autocheck and not is_force_sandbox:
- if invalid_requirements_for_distbuild:
- errors.append("'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(invalid_requirements_for_distbuild))
- if sb_tags:
- errors.append("You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(sb_tags))
- if 'ya:sandbox_coverage' in tags:
- errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
- else:
+ if invalid_requirements_for_distbuild:
+ errors.append("'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(invalid_requirements_for_distbuild))
+ if sb_tags:
+ errors.append("You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(sb_tags))
+ if 'ya:sandbox_coverage' in tags:
+ errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
+ else:
if is_force_sandbox:
- errors.append('ya:force_sandbox can be used with LARGE tests only')
- if 'ya:nofuse' in tags:
- errors.append('ya:nofuse can be used with LARGE tests only')
- if 'ya:privileged' in tags:
- errors.append("ya:privileged can be used with LARGE tests only")
- if in_autocheck and size == consts.TestSize.Large:
- errors.append("LARGE test must have ya:fat tag")
+ errors.append('ya:force_sandbox can be used with LARGE tests only')
+ if 'ya:nofuse' in tags:
+ errors.append('ya:nofuse can be used with LARGE tests only')
+ if 'ya:privileged' in tags:
+ errors.append("ya:privileged can be used with LARGE tests only")
+ if in_autocheck and size == consts.TestSize.Large:
+ errors.append("LARGE test must have ya:fat tag")
if 'ya:privileged' in tags and 'container' not in requirements:
errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
@@ -236,34 +236,34 @@ def validate_test(unit, kw):
valid_kw['REQUIREMENTS'] = serialize_list(requiremtens_list)
if valid_kw.get("FUZZ-OPTS"):
- for option in get_list("FUZZ-OPTS"):
- if not option.startswith("-"):
- errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(option))
- break
- eqpos = option.find("=")
- if eqpos == -1 or len(option) == eqpos + 1:
- errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(option))
- break
- if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
- errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
- break
- if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
- errors.append("You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(option))
- break
-
- if valid_kw.get("YT-SPEC"):
+ for option in get_list("FUZZ-OPTS"):
+ if not option.startswith("-"):
+ errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(option))
+ break
+ eqpos = option.find("=")
+ if eqpos == -1 or len(option) == eqpos + 1:
+ errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(option))
+ break
+ if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
+ errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
+ break
+ if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
+ errors.append("You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(option))
+ break
+
+ if valid_kw.get("YT-SPEC"):
if not is_ytexec_run:
- errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
- else:
- for filename in get_list("YT-SPEC"):
- filename = unit.resolve('$S/' + filename)
- if not os.path.exists(filename):
- errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
- continue
+ errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
+ else:
+ for filename in get_list("YT-SPEC"):
+ filename = unit.resolve('$S/' + filename)
+ if not os.path.exists(filename):
+ errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
+ continue
if is_yt_spec_contain_pool_info(filename) and "ya:external" not in tags:
tags.append("ya:external")
tags.append("ya:yt_research_pool")
-
+
if valid_kw.get("USE_ARCADIA_PYTHON") == "yes" and valid_kw.get("SCRIPT-REL-PATH") == "py.test":
errors.append("PYTEST_SCRIPT is deprecated")
@@ -274,23 +274,23 @@ def validate_test(unit, kw):
if valid_kw.get('SPLIT-FACTOR'):
if valid_kw.get('FORK-MODE') == 'none':
errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
-
- value = 1
+
+ value = 1
try:
value = int(valid_kw.get('SPLIT-FACTOR'))
if value <= 0:
raise ValueError("must be > 0")
- if value > SPLIT_FACTOR_MAX_VALUE:
- raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
+ if value > SPLIT_FACTOR_MAX_VALUE:
+ raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
except ValueError as e:
errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
- if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
- nfiles = count_entries(valid_kw.get('TEST-FILES'))
- if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
- errors.append('Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
- nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value))
-
+ if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
+ nfiles = count_entries(valid_kw.get('TEST-FILES'))
+ if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
+ errors.append('Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
+ nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value))
+
unit_path = get_norm_unit_path(unit)
if not is_fat and "ya:noretries" in tags and not is_ytexec_run \
and not unit_path.startswith("devtools/") \
@@ -300,25 +300,25 @@ def validate_test(unit, kw):
and not unit_path.startswith("yp/tests"):
errors.append("Only LARGE tests can have 'ya:noretries' tag")
- if errors:
- return None, warnings, errors
-
- return valid_kw, warnings, errors
-
-
-def get_norm_unit_path(unit, extra=None):
- path = _common.strip_roots(unit.path())
- if extra:
- return '{}/{}'.format(path, extra)
- return path
-
-
-def dump_test(unit, kw):
- valid_kw, warnings, errors = validate_test(unit, kw)
- for w in warnings:
- unit.message(['warn', w])
- for e in errors:
- ymake.report_configure_error(e)
+ if errors:
+ return None, warnings, errors
+
+ return valid_kw, warnings, errors
+
+
+def get_norm_unit_path(unit, extra=None):
+ path = _common.strip_roots(unit.path())
+ if extra:
+ return '{}/{}'.format(path, extra)
+ return path
+
+
+def dump_test(unit, kw):
+ valid_kw, warnings, errors = validate_test(unit, kw)
+ for w in warnings:
+ unit.message(['warn', w])
+ for e in errors:
+ ymake.report_configure_error(e)
if valid_kw is None:
return None
string_handler = StringIO.StringIO()
@@ -339,14 +339,14 @@ def deserialize_list(val):
return filter(None, val.replace('"', "").split(";"))
-def count_entries(x):
- # see (de)serialize_list
- assert x is None or isinstance(x, str), type(x)
- if not x:
- return 0
- return x.count(";") + 1
-
-
+def count_entries(x):
+ # see (de)serialize_list
+ assert x is None or isinstance(x, str), type(x)
+ if not x:
+ return 0
+ return x.count(";") + 1
+
+
def get_values_list(unit, key):
res = map(str.strip, (unit.get(key) or '').replace('$' + key, '').strip().split())
return [r for r in res if r and r not in ['""', "''"]]
@@ -357,31 +357,31 @@ def get_norm_paths(unit, key):
return [x.rstrip('\\/') for x in get_values_list(unit, key)]
-def get_unit_list_variable(unit, name):
- items = unit.get(name)
- if items:
- items = items.split(' ')
- assert items[0] == "${}".format(name), (items, name)
- return items[1:]
- return []
-
-
-def implies(a, b):
- return bool((not a) or b)
-
-
-def match_coverage_extractor_requirements(unit):
- # we shouldn't add test if
- return all([
- # tests are not requested
- unit.get("TESTS_REQUESTED") == "yes",
- # build doesn't imply clang coverage, which supports segment extraction from the binaries
- unit.get("CLANG_COVERAGE") == "yes",
- # contrib wasn't requested
- implies(get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"),
- ])
-
-
+def get_unit_list_variable(unit, name):
+ items = unit.get(name)
+ if items:
+ items = items.split(' ')
+ assert items[0] == "${}".format(name), (items, name)
+ return items[1:]
+ return []
+
+
+def implies(a, b):
+ return bool((not a) or b)
+
+
+def match_coverage_extractor_requirements(unit):
+ # we shouldn't add test if
+ return all([
+ # tests are not requested
+ unit.get("TESTS_REQUESTED") == "yes",
+ # build doesn't imply clang coverage, which supports segment extraction from the binaries
+ unit.get("CLANG_COVERAGE") == "yes",
+ # contrib wasn't requested
+ implies(get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"),
+ ])
+
+
def get_tidy_config_map(unit):
global tidy_config_map
if tidy_config_map is None:
@@ -420,16 +420,16 @@ def onadd_ytest(unit, *args):
test_data = sorted(_common.filter_out_by_keyword(spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'))
- if flat_args[1] == "fuzz.test":
- unit.ondata("arcadia/fuzzing/{}/corpus.json".format(get_norm_unit_path(unit)))
+ if flat_args[1] == "fuzz.test":
+ unit.ondata("arcadia/fuzzing/{}/corpus.json".format(get_norm_unit_path(unit)))
elif flat_args[1] == "go.test":
data, _ = get_canonical_test_resources(unit)
test_data += data
- elif flat_args[1] == "coverage.extractor" and not match_coverage_extractor_requirements(unit):
- # XXX
- # Current ymake implementation doesn't allow to call macro inside the 'when' body
- # that's why we add ADD_YTEST(coverage.extractor) to every PROGRAM entry and check requirements later
- return
+ elif flat_args[1] == "coverage.extractor" and not match_coverage_extractor_requirements(unit):
+ # XXX
+ # Current ymake implementation doesn't allow to call macro inside the 'when' body
+ # that's why we add ADD_YTEST(coverage.extractor) to every PROGRAM entry and check requirements later
+ return
elif flat_args[1] == "clang_tidy" and unit.get("TIDY") != "yes":
# Graph is not prepared
return
@@ -439,7 +439,7 @@ def onadd_ytest(unit, *args):
test_tags = serialize_list(_get_test_tags(unit, spec_args))
test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
+
if flat_args[1] != "clang_tidy" and unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
if flat_args[1] in ("unittest.py", "gunittest", "g_benchmark"):
@@ -471,17 +471,17 @@ def onadd_ytest(unit, *args):
fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
fork_mode = ' '.join(fork_mode) if fork_mode else ''
- unit_path = get_norm_unit_path(unit)
-
+ unit_path = get_norm_unit_path(unit)
+
test_record = {
'TEST-NAME': flat_args[0],
'SCRIPT-REL-PATH': flat_args[1],
'TESTED-PROJECT-NAME': unit.name(),
'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
+ 'SOURCE-FOLDER-PATH': unit_path,
+ # TODO get rid of BUILD-FOLDER-PATH
+ 'BUILD-FOLDER-PATH': unit_path,
+ 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
'GLOBAL-LIBRARY-PATH': unit.global_filename(),
'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
@@ -495,9 +495,9 @@ def onadd_ytest(unit, *args):
'TAG': test_tags,
'REQUIREMENTS': serialize_list(test_requirements),
'TEST-CWD': unit.get('TEST_CWD_VALUE') or '',
- 'FUZZ-DICTS': serialize_list(spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE')),
- 'FUZZ-OPTS': serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE')),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
+ 'FUZZ-DICTS': serialize_list(spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE')),
+ 'FUZZ-OPTS': serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE')),
+ 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
'BLOB': unit.get('TEST_BLOB_DATA') or '',
'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE') or '',
@@ -513,12 +513,12 @@ def onadd_ytest(unit, *args):
else:
test_record["TEST-NAME"] += "_bench"
- if flat_args[1] == 'fuzz.test' and unit.get('FUZZING') == 'yes':
- test_record['FUZZING'] = '1'
+ if flat_args[1] == 'fuzz.test' and unit.get('FUZZING') == 'yes':
+ test_record['FUZZING'] = '1'
# use all cores if fuzzing requested
test_record['REQUIREMENTS'] = serialize_list(filter(None, deserialize_list(test_record['REQUIREMENTS']) + ["cpu:all", "ram:all"]))
-
- data = dump_test(unit, test_record)
+
+ data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
@@ -550,7 +550,7 @@ def onadd_check(unit, *args):
flat_args, spec_args = _common.sort_by_keywords({"DEPENDS": -1, "TIMEOUT": 1, "DATA": -1, "TAG": -1, "REQUIREMENTS": -1, "FORK_MODE": 1,
"SPLIT_FACTOR": 1, "FORK_SUBTESTS": 0, "FORK_TESTS": 0, "SIZE": 1}, args)
check_type = flat_args[0]
- test_dir = get_norm_unit_path(unit)
+ test_dir = get_norm_unit_path(unit)
test_timeout = ''
fork_mode = ''
@@ -558,7 +558,7 @@ def onadd_check(unit, *args):
extra_test_dart_data = {}
ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
- if check_type in ["flake8.py2", "flake8.py3"]:
+ if check_type in ["flake8.py2", "flake8.py3"]:
script_rel_path = check_type
fork_mode = unit.get('TEST_FORK_MODE') or ''
elif check_type == "JAVA_STYLE":
@@ -628,18 +628,18 @@ def onadd_check(unit, *args):
'FORK-TEST-FILES': '',
'SIZE': 'SMALL',
'TAG': '',
- 'REQUIREMENTS': '',
+ 'REQUIREMENTS': '',
'USE_ARCADIA_PYTHON': use_arcadia_python or '',
'OLD_PYTEST': 'no',
'PYTHON-PATHS': '',
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': test_files,
- 'TEST-FILES': test_files,
+ # TODO remove FILES, see DEVTOOLS-7052
+ 'FILES': test_files,
+ 'TEST-FILES': test_files,
'NO_JBUILD': 'yes' if ymake_java_test else 'no',
}
test_record.update(extra_test_dart_data)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
@@ -659,10 +659,10 @@ def onadd_check_py_imports(unit, *args):
return
unit.onpeerdir(['library/python/testing/import_test'])
check_type = "py.imports"
- test_dir = get_norm_unit_path(unit)
+ test_dir = get_norm_unit_path(unit)
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
- test_files = serialize_list([get_norm_unit_path(unit, unit.filename())])
+ test_files = serialize_list([get_norm_unit_path(unit, unit.filename())])
test_record = {
'TEST-NAME': "pyimports",
'TEST-TIMEOUT': '',
@@ -681,15 +681,15 @@ def onadd_check_py_imports(unit, *args):
'USE_ARCADIA_PYTHON': use_arcadia_python or '',
'OLD_PYTEST': 'no',
'PYTHON-PATHS': '',
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': test_files,
- 'TEST-FILES': test_files,
+ # TODO remove FILES, see DEVTOOLS-7052
+ 'FILES': test_files,
+ 'TEST-FILES': test_files,
}
if unit.get('NO_CHECK_IMPORTS_FOR_VALUE') != "None":
test_record["NO-CHECK"] = serialize_list(get_values_list(unit, 'NO_CHECK_IMPORTS_FOR_VALUE') or ["*"])
else:
test_record["NO-CHECK"] = ''
- data = dump_test(unit, test_record)
+ data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
@@ -714,14 +714,14 @@ def onadd_pytest_script(unit, *args):
test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
tags = _get_test_tags(unit)
- requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- data, data_files = get_canonical_test_resources(unit)
+ data, data_files = get_canonical_test_resources(unit)
test_data += data
python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
binary_path = None
test_cwd = unit.get('TEST_CWD_VALUE') or ''
- _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, data_files=data_files)
+ _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, data_files=data_files)
def onadd_pytest_bin(unit, *args):
@@ -756,40 +756,40 @@ def add_test_to_dart(unit, test_type, binary_path=None, runner_bin=None):
test_size = unit.get('TEST_SIZE_NAME') or ''
test_cwd = unit.get('TEST_CWD_VALUE') or ''
- unit_path = unit.path()
+ unit_path = unit.path()
test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
tags = _get_test_tags(unit)
- requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- data, data_files = get_canonical_test_resources(unit)
+ data, data_files = get_canonical_test_resources(unit)
test_data += data
python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
- yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
+ yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
if not binary_path:
- binary_path = os.path.join(unit_path, unit.filename())
- _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, runner_bin=runner_bin, yt_spec=yt_spec, data_files=data_files)
+ binary_path = os.path.join(unit_path, unit.filename())
+ _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, runner_bin=runner_bin, yt_spec=yt_spec, data_files=data_files)
def extract_java_system_properties(unit, args):
if len(args) % 2:
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
- props = []
+ props = []
for x, y in zip(args[::2], args[1::2]):
if x == 'FILE':
if y.startswith('${BINDIR}') or y.startswith('${ARCADIA_BUILD_ROOT}') or y.startswith('/'):
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
y = _common.rootrel_arc_src(y, unit)
if not os.path.exists(unit.resolve('$S/' + y)):
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
y = '${ARCADIA_ROOT}/' + y
props.append({'type': 'file', 'path': y})
else:
props.append({'type': 'inline', 'key': x, 'value': y})
- return props, None
+ return props, None
def onjava_test(unit, *args):
@@ -801,28 +801,28 @@ def onjava_test(unit, *args):
if unit.get('MODULE_TYPE') == 'JTEST_FOR':
if not unit.get('UNITTEST_DIR'):
- ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
- return
+ ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
+ return
java_cp_arg_type = unit.get('JAVA_CLASSPATH_CMD_TYPE_VALUE') or 'MANIFEST'
if java_cp_arg_type not in ('MANIFEST', 'COMMAND_FILE', 'LIST'):
ymake.report_configure_error('{}: TEST_JAVA_CLASSPATH_CMD_TYPE({}) are invalid. Choose argument from MANIFEST, COMMAND_FILE or LIST)'.format(unit.path(), java_cp_arg_type))
return
- unit_path = unit.path()
- path = _common.strip_roots(unit_path)
+ unit_path = unit.path()
+ path = _common.strip_roots(unit_path)
test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- test_data.append('arcadia/build/scripts/run_junit.py')
+ test_data.append('arcadia/build/scripts/run_junit.py')
test_data.append('arcadia/build/scripts/unpacking_jtest_runner.py')
- data, data_files = get_canonical_test_resources(unit)
- test_data += data
-
- props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
- if error_mgs:
- ymake.report_configure_error(error_mgs)
- return
+ data, data_files = get_canonical_test_resources(unit)
+ test_data += data
+
+ props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
+ if error_mgs:
+ ymake.report_configure_error(error_mgs)
+ return
for prop in props:
if prop['type'] == 'file':
test_data.append(prop['path'].replace('${ARCADIA_ROOT}', 'arcadia'))
@@ -831,7 +831,7 @@ def onjava_test(unit, *args):
test_cwd = unit.get('TEST_CWD_VALUE') or '' # TODO: validate test_cwd value
- if unit.get('MODULE_TYPE') == 'JUNIT5':
+ if unit.get('MODULE_TYPE') == 'JUNIT5':
script_rel_path = 'junit5.test'
else:
script_rel_path = 'junit.test'
@@ -842,7 +842,7 @@ def onjava_test(unit, *args):
'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path)]),
'SCRIPT-REL-PATH': script_rel_path,
'TEST-TIMEOUT': unit.get('TEST_TIMEOUT') or '',
- 'TESTED-PROJECT-NAME': path,
+ 'TESTED-PROJECT-NAME': path,
'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
# 'TEST-PRESERVE-ENV': 'da',
'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
@@ -878,7 +878,7 @@ def onjava_test(unit, *args):
else:
test_record['TEST_JAR'] = '{}/{}.jar'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
- data = dump_test(unit, test_record)
+ data = dump_test(unit, test_record)
if data:
unit.set_property(['DART_DATA', data])
@@ -892,7 +892,7 @@ def onjava_test_deps(unit, *args):
assert len(args) == 1
mode = args[0]
- path = get_norm_unit_path(unit)
+ path = get_norm_unit_path(unit)
ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
test_record = {
@@ -923,7 +923,7 @@ def onjava_test_deps(unit, *args):
if ymake_java_test:
test_record['CLASSPATH'] = '$B/{}/{}.jar ${{DART_CLASSPATH}}'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
- data = dump_test(unit, test_record)
+ data = dump_test(unit, test_record)
unit.set_property(['DART_DATA', data])
@@ -951,12 +951,12 @@ def _dump_test(
fork_mode,
test_size,
tags,
- requirements,
+ requirements,
binary_path='',
old_pytest=False,
test_cwd=None,
- runner_bin=None,
- yt_spec=None,
+ runner_bin=None,
+ yt_spec=None,
data_files=None
):
@@ -965,7 +965,7 @@ def _dump_test(
else:
script_rel_path = test_type
- unit_path = unit.path()
+ unit_path = unit.path()
fork_test_files = unit.get('FORK_TEST_FILES_MODE')
fork_mode = ' '.join(fork_mode) if fork_mode else ''
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
@@ -1029,11 +1029,11 @@ def onrun(unit, *args):
def onsetup_exectest(unit, *args):
- command = unit.get(["EXECTEST_COMMAND_VALUE"])
- if command is None:
- ymake.report_configure_error("EXECTEST must have at least one RUN macro")
- return
- command = command.replace("$EXECTEST_COMMAND_VALUE", "")
+ command = unit.get(["EXECTEST_COMMAND_VALUE"])
+ if command is None:
+ ymake.report_configure_error("EXECTEST must have at least one RUN macro")
+ return
+ command = command.replace("$EXECTEST_COMMAND_VALUE", "")
if "PYTHON_BIN" in command:
unit.ondepends('contrib/tools/python')
unit.set(["TEST_BLOB_DATA", base64.b64encode(command)])
@@ -1043,71 +1043,71 @@ def onsetup_exectest(unit, *args):
def onsetup_run_python(unit):
if unit.get("USE_ARCADIA_PYTHON") == "yes":
unit.ondepends('contrib/tools/python')
-
-
-def get_canonical_test_resources(unit):
- unit_path = unit.path()
+
+
+def get_canonical_test_resources(unit):
+ unit_path = unit.path()
canon_data_dir = os.path.join(unit.resolve(unit_path), CANON_DATA_DIR_NAME, unit.get('CANONIZE_SUB_PATH') or '')
-
- try:
- _, dirs, files = next(os.walk(canon_data_dir))
- except StopIteration:
- # path doesn't exist
- return [], []
-
- if CANON_RESULT_FILE_NAME in files:
- return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
- return [], []
-
-
-def _load_canonical_file(filename, unit_path):
- try:
- with open(filename) as results_file:
- return json.load(results_file)
- except Exception as e:
- print>>sys.stderr, "malformed canonical data in {}: {} ({})".format(unit_path, e, filename)
- return {}
-
-
-def _get_resource_from_uri(uri):
- m = CANON_MDS_RESOURCE_REGEX.match(uri)
- if m:
- res_id = m.group(1)
- return "{}:{}".format(MDS_SHEME, res_id)
-
- m = CANON_SBR_RESOURCE_REGEX.match(uri)
- if m:
- # There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
- # That's why we use notation with '=' to specify specific path for resource
- uri = m.group(1)
- res_id = m.group(2)
- return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
-
-
-def _get_external_resources_from_canon_data(data):
- # Method should work with both canonization versions:
- # result.json: {'uri':X 'checksum':Y}
- # result.json: {'testname': {'uri':X 'checksum':Y}}
- # result.json: {'testname': [{'uri':X 'checksum':Y}]}
- # Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
- # That's why we check 'uri' and 'checksum' fields presence
- # (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
- res = set()
-
- if isinstance(data, dict):
- if 'uri' in data and 'checksum' in data:
- resource = _get_resource_from_uri(data['uri'])
- if resource:
- res.add(resource)
- else:
- for k, v in data.iteritems():
- res.update(_get_external_resources_from_canon_data(v))
- elif isinstance(data, list):
- for e in data:
- res.update(_get_external_resources_from_canon_data(e))
-
- return res
-
-
-def _get_canonical_data_resources_v2(filename, unit_path):
+
+ try:
+ _, dirs, files = next(os.walk(canon_data_dir))
+ except StopIteration:
+ # path doesn't exist
+ return [], []
+
+ if CANON_RESULT_FILE_NAME in files:
+ return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
+ return [], []
+
+
+def _load_canonical_file(filename, unit_path):
+ try:
+ with open(filename) as results_file:
+ return json.load(results_file)
+ except Exception as e:
+ print>>sys.stderr, "malformed canonical data in {}: {} ({})".format(unit_path, e, filename)
+ return {}
+
+
+def _get_resource_from_uri(uri):
+ m = CANON_MDS_RESOURCE_REGEX.match(uri)
+ if m:
+ res_id = m.group(1)
+ return "{}:{}".format(MDS_SHEME, res_id)
+
+ m = CANON_SBR_RESOURCE_REGEX.match(uri)
+ if m:
+ # There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
+ # That's why we use notation with '=' to specify specific path for resource
+ uri = m.group(1)
+ res_id = m.group(2)
+ return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
+
+
+def _get_external_resources_from_canon_data(data):
+ # Method should work with both canonization versions:
+ # result.json: {'uri':X 'checksum':Y}
+ # result.json: {'testname': {'uri':X 'checksum':Y}}
+ # result.json: {'testname': [{'uri':X 'checksum':Y}]}
+ # Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
+ # That's why we check 'uri' and 'checksum' fields presence
+ # (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
+ res = set()
+
+ if isinstance(data, dict):
+ if 'uri' in data and 'checksum' in data:
+ resource = _get_resource_from_uri(data['uri'])
+ if resource:
+ res.add(resource)
+ else:
+ for k, v in data.iteritems():
+ res.update(_get_external_resources_from_canon_data(v))
+ elif isinstance(data, list):
+ for e in data:
+ res.update(_get_external_resources_from_canon_data(e))
+
+ return res
+
+
+def _get_canonical_data_resources_v2(filename, unit_path):
return (_get_external_resources_from_canon_data(_load_canonical_file(filename, unit_path)), [filename])