aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralevitskii <alevitskii@yandex-team.com>2024-07-03 10:17:11 +0300
committeralevitskii <alevitskii@yandex-team.com>2024-07-03 10:30:19 +0300
commit5192bfbd54cb78b5af7441709b91992b082e427e (patch)
tree6ff732f7ec48adbe4309aacaafe46bd00fd98305
parentc6290a63882d8fb818d200c847e1c479e2c35dfc (diff)
downloadydb-5192bfbd54cb78b5af7441709b91992b082e427e.tar.gz
Refactor test plugins
Распространил изменения из https://a.yandex-team.ru/review/6177977/details на все тестовые плагины 04c66243f05a11f00ccd67355170533a3316c85f
-rw-r--r--build/plugins/_dart_fields.py1234
-rw-r--r--build/plugins/nots.py465
-rw-r--r--build/plugins/ya.make1
-rw-r--r--build/plugins/ytest.py1598
4 files changed, 1945 insertions, 1353 deletions
diff --git a/build/plugins/_dart_fields.py b/build/plugins/_dart_fields.py
new file mode 100644
index 0000000000..a8d18954f8
--- /dev/null
+++ b/build/plugins/_dart_fields.py
@@ -0,0 +1,1234 @@
+import base64
+import functools
+import json
+import operator
+import os
+import re
+import shlex
+import six
+import sys
+from functools import reduce
+
+import _common
+import lib.test_const as consts
+import ymake
+
+
+CANON_RESULT_FILE_NAME = 'result.json'
+CANON_DATA_DIR_NAME = 'canondata'
+CANON_OUTPUT_STORAGE = 'canondata_storage'
+
+KTLINT_CURRENT_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint/.editorconfig"
+KTLINT_OLD_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint_old/.editorconfig"
+
+
+class DartValueError(ValueError):
+ pass
+
+
+def create_dart_record(fields, *args):
+ return reduce(operator.or_, (value for field in fields if (value := field(*args))), {})
+
+
+def with_fields(fields):
+ def inner(func):
+ @functools.wraps(func)
+ def innermost(*args, **kwargs):
+ func(fields, *args, **kwargs)
+
+ return innermost
+
+ return inner
+
+
+def serialize_list(lst):
+ lst = list(filter(None, lst))
+ return '\"' + ';'.join(lst) + '\"' if lst else ''
+
+
+def deserialize_list(val):
+ return list(filter(None, val.replace('"', "").split(";")))
+
+
+def get_unit_list_variable(unit, name):
+ items = unit.get(name)
+ if items:
+ items = items.split(' ')
+ assert items[0] == "${}".format(name), (items, name)
+ return items[1:]
+ return []
+
+
+def get_values_list(unit, key):
+ res = map(str.strip, (unit.get(key) or '').replace('$' + key, '').strip().split())
+ return [r for r in res if r and r not in ['""', "''"]]
+
+
+def _get_test_tags(unit, spec_args=None):
+ if spec_args is None:
+ spec_args = {}
+ tags = spec_args.get('TAG', []) + get_values_list(unit, 'TEST_TAGS_VALUE')
+ tags = set(tags)
+ if unit.get('EXPORT_SEM') == 'yes':
+ filter_only_tags = sorted(t for t in tags if ':' not in t)
+ unit.set(['FILTER_ONLY_TEST_TAGS', ' '.join(filter_only_tags)])
+ # DEVTOOLS-7571
+ if unit.get('SKIP_TEST_VALUE') and consts.YaTestTags.Fat in tags:
+ tags.add(consts.YaTestTags.NotAutocheck)
+
+ return tags
+
+
+def format_recipes(data: str | None) -> str:
+ if not data:
+ return ""
+
+ data = data.replace('"USE_RECIPE_DELIM"', "\n")
+ data = data.replace("$TEST_RECIPES_VALUE", "")
+ return data
+
+
+def prepare_recipes(data: str | None) -> bytes:
+ formatted = format_recipes(data)
+ return base64.b64encode(six.ensure_binary(formatted))
+
+
+def prepare_env(data):
+ data = data.replace("$TEST_ENV_VALUE", "")
+ return serialize_list(shlex.split(data))
+
+
+def get_norm_paths(unit, key):
+ # return paths without trailing (back)slash
+ return [x.rstrip('\\/').replace('${ARCADIA_ROOT}/', '') for x in get_values_list(unit, key)]
+
+
+def _load_canonical_file(filename, unit_path):
+ try:
+ with open(filename, 'rb') as results_file:
+ return json.load(results_file)
+ except Exception as e:
+ print("malformed canonical data in {}: {} ({})".format(unit_path, e, filename), file=sys.stderr)
+ return {}
+
+
+def _get_resource_from_uri(uri):
+ m = consts.CANON_MDS_RESOURCE_REGEX.match(uri)
+ if m:
+ key = m.group(1)
+ return "{}:{}".format(consts.MDS_SCHEME, key)
+
+ m = consts.CANON_BACKEND_RESOURCE_REGEX.match(uri)
+ if m:
+ key = m.group(1)
+ return "{}:{}".format(consts.MDS_SCHEME, key)
+
+ m = consts.CANON_SBR_RESOURCE_REGEX.match(uri)
+ if m:
+ # There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
+ # That's why we use notation with '=' to specify specific path for resource
+ uri = m.group(1)
+ res_id = m.group(2)
+ return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
+
+
+def _get_external_resources_from_canon_data(data):
+ # Method should work with both canonization versions:
+ # result.json: {'uri':X 'checksum':Y}
+ # result.json: {'testname': {'uri':X 'checksum':Y}}
+ # result.json: {'testname': [{'uri':X 'checksum':Y}]}
+ # Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
+ # That's why we check 'uri' and 'checksum' fields presence
+ # (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
+ res = set()
+
+ if isinstance(data, dict):
+ if 'uri' in data and 'checksum' in data:
+ resource = _get_resource_from_uri(data['uri'])
+ if resource:
+ res.add(resource)
+ else:
+ for k, v in six.iteritems(data):
+ res.update(_get_external_resources_from_canon_data(v))
+ elif isinstance(data, list):
+ for e in data:
+ res.update(_get_external_resources_from_canon_data(e))
+
+ return res
+
+
+def _get_canonical_data_resources_v2(filename, unit_path):
+ return (_get_external_resources_from_canon_data(_load_canonical_file(filename, unit_path)), [filename])
+
+
+def get_canonical_test_resources(unit):
+ unit_path = unit.path()
+ if unit.get("CUSTOM_CANONDATA_PATH"):
+ path_to_canondata = unit_path.replace("$S", unit.get("CUSTOM_CANONDATA_PATH"))
+ else:
+ path_to_canondata = unit.resolve(unit_path)
+ canon_data_dir = os.path.join(path_to_canondata, CANON_DATA_DIR_NAME, unit.get('CANONIZE_SUB_PATH') or '')
+ try:
+ _, dirs, files = next(os.walk(canon_data_dir))
+ except StopIteration:
+ # path doesn't exist
+ return [], []
+ if CANON_RESULT_FILE_NAME in files:
+ return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
+ return [], []
+
+
+def java_srcdirs_to_data(unit, var):
+ extra_data = []
+ for srcdir in (unit.get(var) or '').replace('$' + var, '').split():
+ if srcdir == '.':
+ srcdir = unit.get('MODDIR')
+ if srcdir.startswith('${ARCADIA_ROOT}/') or srcdir.startswith('$ARCADIA_ROOT/'):
+ srcdir = srcdir.replace('${ARCADIA_ROOT}/', '$S/')
+ srcdir = srcdir.replace('$ARCADIA_ROOT/', '$S/')
+ if srcdir.startswith('${CURDIR}') or srcdir.startswith('$CURDIR'):
+ srcdir = srcdir.replace('${CURDIR}', os.path.join('$S', unit.get('MODDIR')))
+ srcdir = srcdir.replace('$CURDIR', os.path.join('$S', unit.get('MODDIR')))
+ srcdir = unit.resolve_arc_path(srcdir)
+ if not srcdir.startswith('$'):
+ srcdir = os.path.join('$S', unit.get('MODDIR'), srcdir)
+ if srcdir.startswith('$S'):
+ extra_data.append(srcdir.replace('$S', 'arcadia'))
+ return serialize_list(extra_data)
+
+
+def extract_java_system_properties(unit, args):
+ if len(args) % 2:
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
+
+ props = []
+ for x, y in zip(args[::2], args[1::2]):
+ if x == 'FILE':
+ if y.startswith('${BINDIR}') or y.startswith('${ARCADIA_BUILD_ROOT}') or y.startswith('/'):
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
+
+ y = _common.rootrel_arc_src(y, unit)
+ if not os.path.exists(unit.resolve('$S/' + y)):
+ return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
+
+ y = '${ARCADIA_ROOT}/' + y
+ props.append({'type': 'file', 'path': y})
+ else:
+ props.append({'type': 'inline', 'key': x, 'value': y})
+
+ return props, None
+
+
+def _create_erm_json(unit):
+ from lib.nots.erm_json_lite import ErmJsonLite
+
+ erm_packages_path = unit.get("ERM_PACKAGES_PATH")
+ path = unit.resolve(unit.resolve_arc_path(erm_packages_path))
+
+ return ErmJsonLite.load(path)
+
+
+def _resolve_module_files(unit, mod_dir, file_paths):
+ mod_dir_with_sep_len = len(mod_dir) + 1
+ resolved_files = []
+
+ for path in file_paths:
+ resolved = _common.rootrel_arc_src(path, unit)
+ if resolved.startswith(mod_dir):
+ resolved = resolved[mod_dir_with_sep_len:]
+ resolved_files.append(resolved)
+
+ return resolved_files
+
+
+def _create_pm(unit):
+ from lib.nots.package_manager import manager
+
+ sources_path = unit.path()
+ module_path = unit.get("MODDIR")
+ if unit.get("TS_TEST_FOR"):
+ sources_path = unit.get("TS_TEST_FOR_DIR")
+ module_path = unit.get("TS_TEST_FOR_PATH")
+
+ return manager(
+ sources_path=unit.resolve(sources_path),
+ build_root="$B",
+ build_path=unit.path().replace("$S", "$B", 1),
+ contribs_path=unit.get("NPM_CONTRIBS_PATH"),
+ nodejs_bin_path=None,
+ script_path=None,
+ module_path=module_path,
+ )
+
+
+def _resolve_config_path(unit, test_runner, rel_to):
+ config_path = unit.get("ESLINT_CONFIG_PATH") if test_runner == "eslint" else unit.get("TS_TEST_CONFIG_PATH")
+ arc_config_path = unit.resolve_arc_path(config_path)
+ abs_config_path = unit.resolve(arc_config_path)
+ if not abs_config_path:
+ raise Exception("{} config not found: {}".format(test_runner, config_path))
+
+ unit.onsrcs([arc_config_path])
+ abs_rel_to = unit.resolve(unit.resolve_arc_path(unit.get(rel_to)))
+ return os.path.relpath(abs_config_path, start=abs_rel_to)
+
+
+def _get_ts_test_data_dirs(unit):
+ return sorted(
+ set(
+ [
+ os.path.dirname(_common.rootrel_arc_src(p, unit))
+ for p in (get_values_list(unit, "_TS_TEST_DATA_VALUE") or [])
+ ]
+ )
+ )
+
+
+class AndroidApkTestActivity:
+ KEY = 'ANDROID_APK_TEST_ACTIVITY'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE')}
+
+
+class BenchmarkOpts:
+ KEY = 'BENCHMARK-OPTS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: serialize_list(get_unit_list_variable(unit, 'BENCHMARK_OPTS_VALUE'))}
+
+
+class BinaryPath:
+ KEY = 'BINARY-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ unit_path = _common.get_norm_unit_path(unit)
+ return {cls.KEY: "{}/{}".format(unit_path, unit.filename())}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ unit_path = unit.path()
+ binary_path = os.path.join(unit_path, unit.filename())
+ if binary_path:
+ return {cls.KEY: _common.strip_roots(binary_path)}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ value = _common.strip_roots(os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
+ return {cls.KEY: value}
+
+ # TODO replace with `value`
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ test_dir = _common.get_norm_unit_path(unit)
+ return {cls.KEY: os.path.join(test_dir, unit.filename())}
+
+
+class Blob:
+ KEY = 'BLOB'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_BLOB_DATA')}
+
+
+class BuildFolderPath:
+ KEY = 'BUILD-FOLDER-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: _common.get_norm_unit_path(unit)}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: _common.strip_roots(unit.path())}
+
+
+class CanonizeSubPath:
+ KEY = 'CANONIZE_SUB_PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('CANONIZE_SUB_PATH')}
+
+
+class Classpath:
+ KEY = 'CLASSPATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
+ if ymake_java_test:
+ value = '$B/{}/{}.jar ${{DART_CLASSPATH}}'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
+ return {cls.KEY: value}
+
+
+class ConfigPath:
+ KEY = 'CONFIG-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_runner, rel_to = flat_args
+ return {cls.KEY: _resolve_config_path(unit, test_runner, rel_to=rel_to)}
+
+
+class CustomDependencies:
+ KEY = 'CUSTOM-DEPENDENCIES'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ custom_deps = ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE'))
+ return {cls.KEY: custom_deps}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: " ".join(spec_args.get('DEPENDS', []))}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
+ return {cls.KEY: " ".join(custom_deps)}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ deps = []
+ _, linter = flat_args
+ deps.append(os.path.dirname(linter))
+ deps += spec_args.get('DEPENDS', [])
+ return {cls.KEY: " ".join(deps)}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ deps = _create_pm(unit).get_peers_from_package_json()
+ recipes_lines = format_recipes(unit.get("TEST_RECIPES_VALUE")).strip().splitlines()
+ if recipes_lines:
+ deps = deps or []
+ deps.extend([os.path.dirname(r.strip().split(" ")[0]) for r in recipes_lines])
+
+ return {cls.KEY: " ".join(deps)}
+
+
+class EslintConfigPath:
+ KEY = 'ESLINT_CONFIG_PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_runner, rel_to = flat_args
+ return {cls.KEY: _resolve_config_path(unit, test_runner, rel_to=rel_to)}
+
+
+class ForkMode:
+ KEY = 'FORK-MODE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ fork_mode = []
+ if 'FORK_SUBTESTS' in spec_args:
+ fork_mode.append('subtests')
+ if 'FORK_TESTS' in spec_args:
+ fork_mode.append('tests')
+ fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
+ fork_mode = ' '.join(fork_mode) if fork_mode else ''
+ return {cls.KEY: fork_mode}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_FORK_MODE')}
+
+
+class ForkTestFiles:
+ KEY = 'FORK-TEST-FILES'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('FORK_TEST_FILES_MODE')}
+
+
+class FuzzDicts:
+ KEY = 'FUZZ-DICTS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = serialize_list(spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE'))
+ return {cls.KEY: value}
+
+
+class FuzzOpts:
+ KEY = 'FUZZ-OPTS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE'))
+ return {cls.KEY: value}
+
+
+class Fuzzing:
+ KEY = 'FUZZING'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ if unit.get('FUZZING') == 'yes':
+ return {cls.KEY: '1'}
+
+
+class GlobalLibraryPath:
+ KEY = 'GLOBAL-LIBRARY-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.global_filename()}
+
+
+class GoBenchTimeout:
+ KEY = 'GO_BENCH_TIMEOUT'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('GO_BENCH_TIMEOUT')}
+
+
+class IgnoreClasspathClash:
+ KEY = 'IGNORE_CLASSPATH_CLASH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = ' '.join(get_values_list(unit, 'JAVA_IGNORE_CLASSPATH_CLASH_VALUE'))
+ return {cls.KEY: value}
+
+
+class JavaClasspathCmdType:
+ KEY = 'JAVA_CLASSPATH_CMD_TYPE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ java_cp_arg_type = unit.get('JAVA_CLASSPATH_CMD_TYPE_VALUE') or 'MANIFEST'
+ if java_cp_arg_type not in ('MANIFEST', 'COMMAND_FILE', 'LIST'):
+ # TODO move error reporting out of field classes
+ ymake.report_configure_error(
+ '{}: TEST_JAVA_CLASSPATH_CMD_TYPE({}) are invalid. Choose argument from MANIFEST, COMMAND_FILE or LIST)'.format(
+ unit.path(), java_cp_arg_type
+ )
+ )
+ raise DartValueError
+ return {cls.KEY: java_cp_arg_type}
+
+
+class JdkForTests:
+ KEY = 'JDK_FOR_TESTS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = 'JDK' + (unit.get('JDK_VERSION') or unit.get('JDK_REAL_VERSION') or '_DEFAULT') + '_FOR_TESTS'
+ return {cls.KEY: value}
+
+
+class JdkLatestVersion:
+ KEY = 'JDK_LATEST_VERSION'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('JDK_LATEST_VERSION')}
+
+
+class JdkResource:
+ KEY = 'JDK_RESOURCE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = 'JDK' + (unit.get('JDK_VERSION') or unit.get('JDK_REAL_VERSION') or '_DEFAULT')
+ return {cls.KEY: value}
+
+
+class KtlintBaselineFile:
+ KEY = 'KTLINT_BASELINE_FILE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ if unit.get('_USE_KTLINT_OLD') != 'yes':
+ baseline_path_relative = unit.get('_KTLINT_BASELINE_FILE')
+ if baseline_path_relative:
+ return {cls.KEY: baseline_path_relative}
+
+
+class KtlintBinary:
+ KEY = 'KTLINT_BINARY'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = '$(KTLINT_OLD)/run.bat' if unit.get('_USE_KTLINT_OLD') == 'yes' else '$(KTLINT)/run.bat'
+ return {cls.KEY: value}
+
+
+class LintFileProcessingTime:
+ KEY = 'LINT-FILE-PROCESSING-TIME'
+
+
+class ModuleLang:
+ KEY = 'MODULE_LANG'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN}
+
+
+class ModuleType:
+ KEY = 'MODULE_TYPE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('MODULE_TYPE')}
+
+
+class NoCheck:
+ KEY = 'NO-CHECK'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ if unit.get('NO_CHECK_IMPORTS_FOR_VALUE') != "None":
+ value = serialize_list(get_values_list(unit, 'NO_CHECK_IMPORTS_FOR_VALUE') or ["*"])
+ return {cls.KEY: value}
+
+
+class NodejsRootVarName:
+ KEY = 'NODEJS-ROOT-VAR-NAME'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("NODEJS-ROOT-VAR-NAME")}
+
+
+class NodeModulesBundleFilename:
+ KEY = 'NODE-MODULES-BUNDLE-FILENAME'
+
+
+class OldPytest:
+ KEY = 'OLD_PYTEST'
+
+
+class PythonPaths:
+ KEY = 'PYTHON-PATHS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
+ return {cls.KEY: serialize_list(python_paths)}
+
+
+class Requirements:
+ KEY = 'REQUIREMENTS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ return {cls.KEY: serialize_list(test_requirements)}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ test_requirements = serialize_list(
+ spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ )
+ if unit.get('FUZZING') == 'yes':
+ value = serialize_list(filter(None, deserialize_list(test_requirements) + ["cpu:all", "ram:all"]))
+ return {cls.KEY: value}
+ else:
+ return {cls.KEY: test_requirements}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ value = " ".join(spec_args.get('REQUIREMENTS', []))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ return {cls.KEY: serialize_list(requirements)}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ requirements = sorted(set(["network:full"] + get_values_list(unit, "TEST_REQUIREMENTS_VALUE")))
+ return {cls.KEY: serialize_list(requirements)}
+
+
+class SbrUidExt:
+ KEY = 'SBR-UID-EXT'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ uid_ext = unit.get("SBR_UID_EXT").split(" ", 1)[-1] # strip variable name
+ return {cls.KEY: uid_ext}
+
+
+class ScriptRelPath:
+ KEY = 'SCRIPT-REL-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: flat_args[1]}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: flat_args[0]}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ return {cls.KEY: 'py3test.bin' if (unit.get("PYTHON3") == 'yes') else "pytest.bin"}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ return {cls.KEY: 'junit5.test' if unit.get('MODULE_TYPE') == 'JUNIT5' else 'junit.test'}
+
+
+class Size:
+ KEY = 'SIZE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_SIZE_NAME')}
+
+
+class SkipTest:
+ KEY = 'SKIP_TEST'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('SKIP_TEST_VALUE')}
+
+
+class SourceFolderPath:
+ KEY = 'SOURCE-FOLDER-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: _common.get_norm_unit_path(unit)}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ test_dir = _common.get_norm_unit_path(unit)
+ test_files = flat_args[1:]
+ if test_files:
+ test_dir = os.path.dirname(test_files[0]).lstrip("$S/")
+ return {cls.KEY: test_dir}
+
+
+class SplitFactor:
+ KEY = 'SPLIT-FACTOR'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR')
+ return {cls.KEY: value}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_SPLIT_FACTOR')}
+
+
+class Tag:
+ KEY = 'TAG'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
+ return {cls.KEY: tags}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ tags = serialize_list(get_values_list(unit, "TEST_TAGS_VALUE"))
+ return {cls.KEY: tags}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ tags = sorted(set(["ya:fat", "ya:external", "ya:noretries"] + get_values_list(unit, "TEST_TAGS_VALUE")))
+ return {cls.KEY: serialize_list(tags)}
+
+
+class TestClasspath:
+ KEY = 'TEST_CLASSPATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
+ ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
+ if test_classpath_origins:
+ value = '${TEST_CLASSPATH_MANAGED}'
+ return {cls.KEY: value}
+ elif ymake_java_test:
+ value = '${DART_CLASSPATH}'
+ return {cls.KEY: value}
+
+
+class TestClasspathDeps:
+ KEY = 'TEST_CLASSPATH_DEPS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
+ ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
+ if not test_classpath_origins and ymake_java_test:
+ return {cls.KEY: '${DART_CLASSPATH_DEPS}'}
+
+
+class TestClasspathOrigins:
+ KEY = 'TEST_CLASSPATH_ORIGINS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
+ if test_classpath_origins:
+ return {cls.KEY: test_classpath_origins}
+
+
+class TestCwd:
+ KEY = 'TEST-CWD'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_cwd = unit.get('TEST_CWD_VALUE') # TODO: validate test_cwd value
+ return {cls.KEY: test_cwd}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ test_cwd = unit.get('TEST_CWD_VALUE') or ''
+ if test_cwd:
+ test_cwd = test_cwd.replace("$TEST_CWD_VALUE", "").replace('"MACRO_CALLS_DELIM"', "").strip()
+ return {cls.KEY: test_cwd}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("MODDIR")}
+
+
+class TestData:
+ KEY = 'TEST-DATA'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_data = sorted(
+ _common.filter_out_by_keyword(
+ spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
+ )
+ )
+ return {cls.KEY: serialize_list(test_data)}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ test_data = sorted(
+ _common.filter_out_by_keyword(
+ spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
+ )
+ )
+
+ data, _ = get_canonical_test_resources(unit)
+ test_data += data
+ value = serialize_list(sorted(test_data))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ if unit.get('_USE_KTLINT_OLD') == 'yes':
+ extra_test_data = serialize_list([KTLINT_OLD_EDITOR_CONFIG])
+ else:
+ data_list = [KTLINT_CURRENT_EDITOR_CONFIG]
+ baseline_path_relative = unit.get('_KTLINT_BASELINE_FILE')
+ if baseline_path_relative:
+ baseline_path = unit.resolve_arc_path(baseline_path_relative).replace('$S', 'arcadia')
+ data_list += [baseline_path]
+ extra_test_data = serialize_list(data_list)
+ return {cls.KEY: extra_test_data}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
+ if ymake_java_test:
+ return {cls.KEY: java_srcdirs_to_data(unit, 'ALL_SRCDIRS')}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
+ data, _ = get_canonical_test_resources(unit)
+ test_data += data
+ value = serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED')))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value6(cls, unit, flat_args, spec_args):
+ test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
+ test_data.append('arcadia/build/scripts/run_junit.py')
+ test_data.append('arcadia/build/scripts/unpacking_jtest_runner.py')
+
+ data, _ = get_canonical_test_resources(unit)
+ test_data += data
+
+ props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
+ if error_mgs:
+ # TODO move error reporting out of field classes
+ ymake.report_configure_error(error_mgs)
+ raise DartValueError
+ for prop in props:
+ if prop['type'] == 'file':
+ test_data.append(prop['path'].replace('${ARCADIA_ROOT}', 'arcadia'))
+ value = serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED')))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value7(cls, unit, flat_args, spec_args):
+ return {cls.KEY: serialize_list(get_values_list(unit, "TEST_DATA_VALUE"))}
+
+
+class TsConfigPath:
+ KEY = 'TS_CONFIG_PATH'
+
+
+class TsTestDataDirs:
+ KEY = 'TS-TEST-DATA-DIRS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = serialize_list(_get_ts_test_data_dirs(unit))
+ return {cls.KEY: value}
+
+
+class TsTestDataDirsRename:
+ KEY = 'TS-TEST-DATA-DIRS-RENAME'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("_TS_TEST_DATA_DIRS_RENAME_VALUE")}
+
+
+class TsTestForPath:
+ KEY = 'TS-TEST-FOR-PATH'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("TS_TEST_FOR_PATH")}
+
+
+class TestedProjectFilename:
+ KEY = 'TESTED-PROJECT-FILENAME'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.filename()}
+
+
+class TestedProjectName:
+ KEY = 'TESTED-PROJECT-NAME'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.name()}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ test_dir = _common.get_norm_unit_path(unit)
+ return {cls.KEY: os.path.basename(test_dir)}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ test_dir = _common.get_norm_unit_path(unit)
+ test_files = flat_args[1:]
+ if test_files:
+ test_dir = os.path.dirname(test_files[0]).lstrip("$S/")
+ return {cls.KEY: os.path.basename(test_dir)}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ binary_path = os.path.join(unit.path(), unit.filename())
+ return {cls.KEY: os.path.basename(binary_path)}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ return {cls.KEY: _common.get_norm_unit_path(unit)}
+
+ @classmethod
+ def value6(cls, unit, flat_args, spec_args):
+ value = os.path.basename(os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value7(cls, unit, flat_args, spec_args):
+ return {cls.KEY: os.path.splitext(unit.filename())[0]}
+
+
+class TestFiles:
+ KEY = 'TEST-FILES'
+ # TODO remove FILES, see DEVTOOLS-7052
+ KEY2 = 'FILES'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ data_re = re.compile(r"sbr:/?/?(\d+)=?.*")
+ data = flat_args[1:]
+ resources = []
+ for f in data:
+ matched = re.match(data_re, f)
+ if matched:
+ resources.append(matched.group(1))
+ value = serialize_list(resources)
+ return {cls.KEY: value, cls.KEY2: value}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ value = serialize_list(flat_args[1:])
+ return {cls.KEY: value, cls.KEY2: value}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ test_files = flat_args[1:]
+ check_level = flat_args[1]
+ allowed_levels = {
+ 'base': '/yandex_checks.xml',
+ 'strict': '/yandex_checks_strict.xml',
+ 'extended': '/yandex_checks_extended.xml',
+ 'library': '/yandex_checks_library.xml',
+ }
+ if check_level not in allowed_levels:
+ raise Exception("'{}' is not allowed in LINT(), use one of {}".format(check_level, allowed_levels.keys()))
+ test_files[0] = allowed_levels[check_level]
+ value = serialize_list(test_files)
+ return {cls.KEY: value, cls.KEY2: value}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ value = serialize_list([_common.get_norm_unit_path(unit, unit.filename())])
+ return {cls.KEY: value, cls.KEY2: value}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
+ return {cls.KEY: serialize_list(test_files)}
+
+ @classmethod
+ def value6(cls, unit, flat_args, spec_args):
+ test_files = get_values_list(unit, "_TS_TEST_SRCS_VALUE")
+ test_files = _resolve_module_files(unit, unit.get("MODDIR"), test_files)
+ return {cls.KEY: serialize_list(test_files)}
+
+ @classmethod
+ def value7(cls, unit, flat_args, spec_args):
+ typecheck_files = get_values_list(unit, "TS_INPUT_FILES")
+ test_files = [_common.resolve_common_const(f) for f in typecheck_files]
+ return {cls.KEY: serialize_list(test_files)}
+
+
+class TestEnv:
+ KEY = 'TEST-ENV'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: prepare_env(unit.get("TEST_ENV_VALUE"))}
+
+
+class TestIosDeviceType:
+ KEY = 'TEST_IOS_DEVICE_TYPE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_IOS_DEVICE_TYPE_VALUE')}
+
+
+class TestIosRuntimeType:
+ KEY = 'TEST_IOS_RUNTIME_TYPE'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_IOS_RUNTIME_TYPE_VALUE')}
+
+
+class TestJar:
+ KEY = 'TEST_JAR'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
+ ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
+ if not test_classpath_origins and ymake_java_test:
+ if unit.get('UNITTEST_DIR'):
+ value = '${UNITTEST_MOD}'
+ else:
+ value = '{}/{}.jar'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
+ return {cls.KEY: value}
+
+
+class TestName:
+ KEY = 'TEST-NAME'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: flat_args[0]}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ return {cls.KEY: flat_args[0] + '_bench'}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ return {cls.KEY: flat_args[0].lower()}
+
+ @classmethod
+ def value4(cls, unit, flat_args, spec_args):
+ unit_path = unit.path()
+ binary_path = os.path.join(unit_path, unit.filename())
+ test_name = os.path.basename(binary_path)
+ return {cls.KEY: os.path.splitext(test_name)[0]}
+
+ @classmethod
+ def value5(cls, unit, flat_args, spec_args):
+ path = _common.get_norm_unit_path(unit)
+ value = '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path)])
+ return {cls.KEY: value}
+
+ @classmethod
+ def value6(cls, unit, flat_args, spec_args):
+ path = _common.get_norm_unit_path(unit)
+ value = '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path), 'dependencies']).strip('-')
+ return {cls.KEY: value}
+
+ @classmethod
+ def value7(cls, unit, flat_args, spec_args):
+ test_name = os.path.basename(os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
+ return {cls.KEY: os.path.splitext(test_name)[0]}
+
+
+class TestPartition:
+ KEY = 'TEST_PARTITION'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get("TEST_PARTITION")}
+
+
+class TestRecipes:
+ KEY = 'TEST-RECIPES'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: prepare_recipes(unit.get("TEST_RECIPES_VALUE"))}
+
+
+class TestRunnerBin:
+ KEY = 'TEST-RUNNER-BIN'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ runner_bin = spec_args.get('RUNNER_BIN', [None])[0]
+ if runner_bin:
+ return {cls.KEY: runner_bin}
+
+
+class TestTimeout:
+ KEY = 'TEST-TIMEOUT'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
+ return {cls.KEY: test_timeout}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ timeout = list(filter(None, [unit.get(["TEST_TIMEOUT"])]))
+ if timeout:
+ timeout = timeout[0]
+ else:
+ timeout = '0'
+ return {cls.KEY: timeout}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('TEST_TIMEOUT')}
+
+
+class TsResources:
+ KEY = "{}-ROOT-VAR-NAME"
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ erm_json = _create_erm_json(unit)
+ ret = {}
+ for tool in erm_json.list_npm_packages():
+ tool_resource_label = cls.KEY.format(tool.upper())
+ tool_resource_value = unit.get(tool_resource_label)
+ if tool_resource_value:
+ ret[tool_resource_label] = tool_resource_value
+ return ret
+
+
+class JvmArgs:
+ KEY = 'JVM_ARGS'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = serialize_list(get_values_list(unit, 'JVM_ARGS_VALUE'))
+ return {cls.KEY: value}
+
+
+class StrictClasspathClash:
+ KEY = 'STRICT_CLASSPATH_CLASH'
+
+
+class SystemProperties:
+ KEY = 'SYSTEM_PROPERTIES'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
+ if error_mgs:
+ # TODO move error reporting out of field classes
+ ymake.report_configure_error(error_mgs)
+ raise DartValueError
+
+ props = base64.b64encode(six.ensure_binary(json.dumps(props)))
+ return {cls.KEY: props}
+
+
+class UnittestDir:
+ KEY = 'UNITTEST_DIR'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('UNITTEST_DIR')}
+
+
+class UseArcadiaPython:
+ KEY = 'USE_ARCADIA_PYTHON'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ return {cls.KEY: unit.get('USE_ARCADIA_PYTHON')}
+
+
+class UseKtlintOld:
+ KEY = 'USE_KTLINT_OLD'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ if unit.get('_USE_KTLINT_OLD') == 'yes':
+ return {cls.KEY: 'yes'}
+
+
+class YtSpec:
+ KEY = 'YT-SPEC'
+
+ @classmethod
+ def value(cls, unit, flat_args, spec_args):
+ value = serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
+ return {cls.KEY: value}
+
+ @classmethod
+ def value2(cls, unit, flat_args, spec_args):
+ yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
+ if yt_spec:
+ return {cls.KEY: serialize_list(yt_spec)}
+
+ @classmethod
+ def value3(cls, unit, flat_args, spec_args):
+ yt_spec_values = get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')
+ return {cls.KEY: serialize_list(yt_spec_values)}
diff --git a/build/plugins/nots.py b/build/plugins/nots.py
index a011218f7d..8774c0d60b 100644
--- a/build/plugins/nots.py
+++ b/build/plugins/nots.py
@@ -1,8 +1,13 @@
import os
+import _dart_fields as df
import ymake
import ytest
-from _common import resolve_common_const, get_norm_unit_path, rootrel_arc_src, to_yesno
+from _dart_fields import (
+ create_dart_record,
+ _create_pm,
+)
+from _common import rootrel_arc_src, to_yesno
# 1 is 60 files per chunk for TIMEOUT(60) - default timeout for SIZE(SMALL)
@@ -11,6 +16,23 @@ from _common import resolve_common_const, get_norm_unit_path, rootrel_arc_src, t
ESLINT_FILE_PROCESSING_TIME_DEFAULT = 0.2 # seconds per file
+TS_TEST_FIELDS_BASE = (
+ df.BinaryPath.value4,
+ df.BuildFolderPath.value,
+ df.ForkMode.value2,
+ df.NodejsRootVarName.value,
+ df.ScriptRelPath.value2,
+ df.SourceFolderPath.value,
+ df.SplitFactor.value2,
+ df.TestData.value7,
+ df.TestedProjectName.value7,
+ df.TestEnv.value,
+ df.TestName.value,
+ df.TestRecipes.value,
+ df.TestTimeout.value3,
+)
+
+
class PluginLogger(object):
def __init__(self):
self.unit = None
@@ -92,26 +114,6 @@ def _build_cmd_input_paths(paths, hide=False, disable_include_processor=False):
return _build_directives("input", [hide_part, disable_ip_part], paths)
-def _create_pm(unit):
- from lib.nots.package_manager import manager
-
- sources_path = unit.path()
- module_path = unit.get("MODDIR")
- if unit.get("TS_TEST_FOR"):
- sources_path = unit.get("TS_TEST_FOR_DIR")
- module_path = unit.get("TS_TEST_FOR_PATH")
-
- return manager(
- sources_path=unit.resolve(sources_path),
- build_root="$B",
- build_path=unit.path().replace("$S", "$B", 1),
- contribs_path=unit.get("NPM_CONTRIBS_PATH"),
- nodejs_bin_path=None,
- script_path=None,
- module_path=module_path,
- )
-
-
def _create_erm_json(unit):
from lib.nots.erm_json_lite import ErmJsonLite
@@ -252,7 +254,7 @@ def on_ts_configure(unit):
_filter_inputs_by_rules_from_tsconfig(unit, tsconfig)
_setup_eslint(unit)
- _setup_tsc_typecheck(unit, tsconfig_paths)
+ _setup_tsc_typecheck(unit)
@_with_report_configure_error
@@ -309,117 +311,279 @@ def _filter_inputs_by_rules_from_tsconfig(unit, tsconfig):
__set_append(unit, "TS_INPUT_FILES", [os.path.join(target_path, f) for f in filtered_files])
-def _get_ts_test_data_dirs(unit):
- return sorted(
- set(
- [
- os.path.dirname(rootrel_arc_src(p, unit))
- for p in (ytest.get_values_list(unit, "_TS_TEST_DATA_VALUE") or [])
- ]
- )
+def _is_tests_enabled(unit):
+ if unit.get("TIDY") == "yes":
+ return False
+
+ return True
+
+
+@df.with_fields(
+ TS_TEST_FIELDS_BASE
+ + (
+ df.Size.value2,
+ df.Tag.value2,
+ df.Requirements.value4,
+ df.ConfigPath.value,
+ df.TsTestDataDirs.value,
+ df.TsTestDataDirsRename.value,
+ df.TsResources.value,
)
+)
+def _add_jest_ts_test(fields, unit, default_config, node_modules_filename):
+ if unit.enabled('TS_COVERAGE'):
+ unit.on_peerdir_ts_resource("nyc")
+ for_mod_path = df.TsTestForPath.value(unit, (), {})[df.TsTestForPath.KEY]
-def _resolve_config_path(unit, test_runner, rel_to):
- config_path = unit.get("ESLINT_CONFIG_PATH") if test_runner == "eslint" else unit.get("TS_TEST_CONFIG_PATH")
- arc_config_path = unit.resolve_arc_path(config_path)
- abs_config_path = unit.resolve(arc_config_path)
- if not abs_config_path:
- raise Exception("{} config not found: {}".format(test_runner, config_path))
+ # for_mod_path = unit.get("TS_TEST_FOR_PATH")
+ unit.onpeerdir([for_mod_path])
+ unit.on_setup_extract_node_modules_recipe([for_mod_path])
+ unit.on_setup_extract_output_tars_recipe([for_mod_path])
- unit.onsrcs([arc_config_path])
- abs_rel_to = unit.resolve(unit.resolve_arc_path(unit.get(rel_to)))
- return os.path.relpath(abs_config_path, start=abs_rel_to)
+ test_runner = 'jest'
+ unit.set(["TS_TEST_NM", os.path.join("$(BUILD_ROOT)", for_mod_path, node_modules_filename)])
-def _is_tests_enabled(unit):
- if unit.get("TIDY") == "yes":
- return False
+ config_path = unit.get("TS_TEST_CONFIG_PATH")
+ if not config_path:
+ config_path = os.path.join(for_mod_path, default_config)
+ unit.set(["TS_TEST_CONFIG_PATH", config_path])
- return True
+ test_files = df.TestFiles.value6(unit, (), {})[df.TestFiles.KEY]
+ if not test_files:
+ ymake.report_configure_error("No tests found")
+ return
+ from lib.nots.package_manager import constants
-def _get_test_runner_handlers():
- return {
- "jest": _add_jest_ts_test,
- "hermione": _add_hermione_ts_test,
- "playwright": _add_playwright_ts_test,
- }
+ def sort_uniq(text):
+ return sorted(set(text))
+ deps = df.CustomDependencies.value5(unit, (), {})[df.CustomDependencies.KEY].split()
-def _add_jest_ts_test(unit, test_runner, test_files, deps, test_record):
- test_record.update(
- {
- "CONFIG-PATH": _resolve_config_path(unit, test_runner, rel_to="TS_TEST_FOR_PATH"),
- }
- )
- _add_test(unit, test_runner, test_files, deps, test_record)
+ if deps:
+ joined_deps = "\n".join(deps)
+ logger.info(f"{test_runner} deps: \n{joined_deps}")
+ unit.ondepends(deps)
+
+ flat_args = (test_runner, "TS_TEST_FOR_PATH")
+ dart_record = create_dart_record(fields, unit, flat_args, {})
+ dart_record[df.TestFiles.KEY] = test_files
+ dart_record[df.NodeModulesBundleFilename.KEY] = constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME
-def _add_hermione_ts_test(unit, test_runner, test_files, deps, test_record):
- test_tags = sorted(set(["ya:fat", "ya:external", "ya:noretries"] + ytest.get_values_list(unit, "TEST_TAGS_VALUE")))
- test_requirements = sorted(set(["network:full"] + ytest.get_values_list(unit, "TEST_REQUIREMENTS_VALUE")))
+ extra_deps = df.CustomDependencies.value3(unit, (), {})[df.CustomDependencies.KEY].split()
+ dart_record[df.CustomDependencies.KEY] = " ".join(sort_uniq(deps + extra_deps))
+ dart_record[df.TsTestForPath.KEY] = for_mod_path
- test_record.update(
- {
- "SIZE": "LARGE",
- "TAG": ytest.serialize_list(test_tags),
- "REQUIREMENTS": ytest.serialize_list(test_requirements),
- "CONFIG-PATH": _resolve_config_path(unit, test_runner, rel_to="TS_TEST_FOR_PATH"),
- }
+ data = ytest.dump_test(unit, dart_record)
+ if data:
+ unit.set_property(["DART_DATA", data])
+
+
+@df.with_fields(
+ TS_TEST_FIELDS_BASE
+ + (
+ df.Tag.value3,
+ df.Requirements.value5,
+ df.ConfigPath.value,
+ df.TsTestDataDirs.value,
+ df.TsTestDataDirsRename.value,
+ df.TsResources.value,
)
+)
+def _add_hermione_ts_test(fields, unit, default_config, node_modules_filename):
+ if unit.enabled('TS_COVERAGE'):
+ unit.on_peerdir_ts_resource("nyc")
- _add_test(unit, test_runner, test_files, deps, test_record)
+ for_mod_path = df.TsTestForPath.value(unit, (), {})[df.TsTestForPath.KEY]
+ # for_mod_path = unit.get("TS_TEST_FOR_PATH")
+ unit.onpeerdir([for_mod_path])
+ unit.on_setup_extract_node_modules_recipe([for_mod_path])
+ unit.on_setup_extract_output_tars_recipe([for_mod_path])
+
+ test_runner = 'hermione'
+
+ unit.set(["TS_TEST_NM", os.path.join("$B", for_mod_path, node_modules_filename)])
+
+ config_path = unit.get("TS_TEST_CONFIG_PATH")
+ if not config_path:
+ config_path = os.path.join(for_mod_path, default_config)
+ unit.set(["TS_TEST_CONFIG_PATH", config_path])
+
+ test_files = df.TestFiles.value6(unit, (), {})[df.TestFiles.KEY]
+ if not test_files:
+ ymake.report_configure_error("No tests found")
+ return
+
+ from lib.nots.package_manager import constants
+
+ def sort_uniq(text):
+ return sorted(set(text))
+
+ deps = df.CustomDependencies.value5(unit, (), {})[df.CustomDependencies.KEY].split()
+
+ if deps:
+ joined_deps = "\n".join(deps)
+ logger.info(f"{test_runner} deps: \n{joined_deps}")
+ unit.ondepends(deps)
+ flat_args = (test_runner, "TS_TEST_FOR_PATH")
-def _add_playwright_ts_test(unit, test_runner, test_files, deps, test_record):
- test_record.update(
- {
- "CONFIG-PATH": _resolve_config_path(unit, test_runner, rel_to="TS_TEST_FOR_PATH"),
- }
+ dart_record = create_dart_record(fields, unit, flat_args, {})
+ dart_record[df.TestFiles.KEY] = test_files
+ dart_record[df.NodeModulesBundleFilename.KEY] = constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME
+
+ extra_deps = df.CustomDependencies.value3(unit, (), {})[df.CustomDependencies.KEY].split()
+ dart_record[df.CustomDependencies.KEY] = " ".join(sort_uniq(deps + extra_deps))
+ dart_record[df.TsTestForPath.KEY] = for_mod_path
+ dart_record[df.Size.KEY] = "LARGE"
+
+ data = ytest.dump_test(unit, dart_record)
+ if data:
+ unit.set_property(["DART_DATA", data])
+
+
+@df.with_fields(
+ TS_TEST_FIELDS_BASE
+ + (
+ df.Size.value2,
+ df.Tag.value2,
+ df.Requirements.value4,
+ df.ConfigPath.value,
+ df.TsTestDataDirs.value,
+ df.TsTestDataDirsRename.value,
+ df.TsResources.value,
)
- _add_test(unit, test_runner, test_files, deps, test_record)
+)
+def _add_playwright_ts_test(fields, unit, default_config, node_modules_filename):
+ if unit.enabled('TS_COVERAGE'):
+ unit.on_peerdir_ts_resource("nyc")
+ for_mod_path = unit.get("TS_TEST_FOR_PATH")
+ unit.onpeerdir([for_mod_path])
+ unit.on_setup_extract_node_modules_recipe([for_mod_path])
+ unit.on_setup_extract_output_tars_recipe([for_mod_path])
+
+ test_runner = 'playwright'
+
+ unit.set(["TS_TEST_NM", os.path.join("$(BUILD_ROOT)", for_mod_path, node_modules_filename)])
+
+ config_path = unit.get("TS_TEST_CONFIG_PATH")
+ if not config_path:
+ config_path = os.path.join(for_mod_path, default_config)
+ unit.set(["TS_TEST_CONFIG_PATH", config_path])
+
+ test_files = df.TestFiles.value6(unit, (), {})[df.TestFiles.KEY]
+ if not test_files:
+ ymake.report_configure_error("No tests found")
+ return
-def _setup_eslint(unit):
+ from lib.nots.package_manager import constants
+
+ def sort_uniq(text):
+ return sorted(set(text))
+
+ deps = df.CustomDependencies.value5(unit, (), {})[df.CustomDependencies.KEY].split()
+
+ if deps:
+ joined_deps = "\n".join(deps)
+ logger.info(f"{test_runner} deps: \n{joined_deps}")
+ unit.ondepends(deps)
+
+ flat_args = (test_runner, "TS_TEST_FOR_PATH")
+
+ dart_record = create_dart_record(fields, unit, flat_args, {})
+ dart_record[df.TestFiles.KEY] = test_files
+ dart_record[df.NodeModulesBundleFilename.KEY] = constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME
+
+ extra_deps = df.CustomDependencies.value3(unit, (), {})[df.CustomDependencies.KEY].split()
+ dart_record[df.CustomDependencies.KEY] = " ".join(sort_uniq(deps + extra_deps))
+ dart_record[df.TsTestForPath.KEY] = for_mod_path
+
+ data = ytest.dump_test(unit, dart_record)
+ if data:
+ unit.set_property(["DART_DATA", data])
+
+
+@df.with_fields(
+ TS_TEST_FIELDS_BASE
+ + (
+ df.Size.value2,
+ df.TestCwd.value3,
+ df.Tag.value2,
+ df.Requirements.value4,
+ df.EslintConfigPath.value,
+ )
+)
+def _setup_eslint(fields, unit):
if not _is_tests_enabled(unit):
return
if unit.get("_NO_LINT_VALUE") == "none":
return
- lint_files = ytest.get_values_list(unit, "_TS_LINT_SRCS_VALUE")
- if not lint_files:
+ test_files = df.TestFiles.value6(unit, (), {})[df.TestFiles.KEY]
+ if not test_files:
return
- mod_dir = unit.get("MODDIR")
-
unit.on_peerdir_ts_resource("eslint")
user_recipes = unit.get("TEST_RECIPES_VALUE")
unit.on_setup_install_node_modules_recipe()
- lint_files = _resolve_module_files(unit, mod_dir, lint_files)
- deps = _create_pm(unit).get_peers_from_package_json()
- test_record = {
- "ESLINT_CONFIG_PATH": _resolve_config_path(unit, "eslint", rel_to="MODDIR"),
- "LINT-FILE-PROCESSING-TIME": str(ESLINT_FILE_PROCESSING_TIME_DEFAULT),
- }
+ test_type = "eslint"
- _add_test(unit, "eslint", lint_files, deps, test_record, mod_dir)
+ from lib.nots.package_manager import constants
+
+ def sort_uniq(text):
+ return sorted(set(text))
+
+ deps = df.CustomDependencies.value5(unit, (), {})[df.CustomDependencies.KEY].split()
+
+ if deps:
+ joined_deps = "\n".join(deps)
+ logger.info(f"{test_type} deps: \n{joined_deps}")
+ unit.ondepends(deps)
+
+ flat_args = (test_type, "TS_TEST_FOR_PATH")
+
+ dart_record = create_dart_record(fields, unit, flat_args, {})
+ dart_record[df.TestFiles.KEY] = test_files
+ dart_record[df.NodeModulesBundleFilename.KEY] = constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME
+
+ extra_deps = df.CustomDependencies.value3(unit, (), {})[df.CustomDependencies.KEY].split()
+ dart_record[df.CustomDependencies.KEY] = " ".join(sort_uniq(deps + extra_deps))
+ dart_record[df.LintFileProcessingTime.KEY] = str(ESLINT_FILE_PROCESSING_TIME_DEFAULT)
+
+ data = ytest.dump_test(unit, dart_record)
+ if data:
+ unit.set_property(["DART_DATA", data])
unit.set(["TEST_RECIPES_VALUE", user_recipes])
-def _setup_tsc_typecheck(unit, tsconfig_paths: list[str]):
+@df.with_fields(
+ TS_TEST_FIELDS_BASE
+ + (
+ df.Size.value2,
+ df.TestCwd.value3,
+ df.Tag.value2,
+ df.Requirements.value4,
+ )
+)
+def _setup_tsc_typecheck(fields, unit):
if not _is_tests_enabled(unit):
return
if unit.get("_TS_TYPECHECK_VALUE") == "none":
return
- typecheck_files = ytest.get_values_list(unit, "TS_INPUT_FILES")
- if not typecheck_files:
+ # typecheck_files = get_values_list(unit, "TS_INPUT_FILES")
+ test_files = df.TestFiles.value6(unit, (), {})[df.TestFiles.KEY]
+ if not test_files:
return
+ tsconfig_paths = unit.get("TS_CONFIG_PATH").split()
tsconfig_path = tsconfig_paths[0]
if len(tsconfig_paths) > 1:
@@ -436,78 +600,47 @@ def _setup_tsc_typecheck(unit, tsconfig_paths: list[str]):
unit.on_setup_install_node_modules_recipe()
unit.on_setup_extract_output_tars_recipe([unit.get("MODDIR")])
- _add_test(
- unit,
- test_type="tsc_typecheck",
- test_files=[resolve_common_const(f) for f in typecheck_files],
- deps=_create_pm(unit).get_peers_from_package_json(),
- test_record={"TS_CONFIG_PATH": tsconfig_path},
- test_cwd=unit.get("MODDIR"),
- )
- unit.set(["TEST_RECIPES_VALUE", user_recipes])
-
-
-def _resolve_module_files(unit, mod_dir, file_paths):
- mod_dir_with_sep_len = len(mod_dir) + 1
- resolved_files = []
+ test_type = "tsc_typecheck"
- for path in file_paths:
- resolved = rootrel_arc_src(path, unit)
- if resolved.startswith(mod_dir):
- resolved = resolved[mod_dir_with_sep_len:]
- resolved_files.append(resolved)
-
- return resolved_files
-
-
-def _add_test(unit, test_type, test_files, deps=None, test_record=None, test_cwd=None):
from lib.nots.package_manager import constants
def sort_uniq(text):
return sorted(set(text))
- recipes_lines = ytest.format_recipes(unit.get("TEST_RECIPES_VALUE")).strip().splitlines()
- if recipes_lines:
- deps = deps or []
- deps.extend([os.path.dirname(r.strip().split(" ")[0]) for r in recipes_lines])
+ deps = df.CustomDependencies.value5(unit, (), {})[df.CustomDependencies.KEY].split()
if deps:
joined_deps = "\n".join(deps)
logger.info(f"{test_type} deps: \n{joined_deps}")
unit.ondepends(deps)
- test_dir = get_norm_unit_path(unit)
- full_test_record = {
- # Key to discover suite (see devtools/ya/test/explore/__init__.py#gen_suite)
- "SCRIPT-REL-PATH": test_type,
- # Test name as shown in PR check, should be unique inside one module
- "TEST-NAME": test_type.lower().replace(".new", ""),
- "TEST-TIMEOUT": unit.get("TEST_TIMEOUT") or "",
- "TEST-ENV": ytest.prepare_env(unit.get("TEST_ENV_VALUE")),
- "TESTED-PROJECT-NAME": os.path.splitext(unit.filename())[0],
- "TEST-RECIPES": ytest.prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- "SOURCE-FOLDER-PATH": test_dir,
- "BUILD-FOLDER-PATH": test_dir,
- "BINARY-PATH": os.path.join(test_dir, unit.filename()),
- "SPLIT-FACTOR": unit.get("TEST_SPLIT_FACTOR") or "",
- "FORK-MODE": unit.get("TEST_FORK_MODE") or "",
- "SIZE": unit.get("TEST_SIZE_NAME") or "",
- "TEST-DATA": ytest.serialize_list(ytest.get_values_list(unit, "TEST_DATA_VALUE")),
- "TEST-FILES": ytest.serialize_list(test_files),
- "TEST-CWD": test_cwd or "",
- "TAG": ytest.serialize_list(ytest.get_values_list(unit, "TEST_TAGS_VALUE")),
- "REQUIREMENTS": ytest.serialize_list(ytest.get_values_list(unit, "TEST_REQUIREMENTS_VALUE")),
- "NODEJS-ROOT-VAR-NAME": unit.get("NODEJS-ROOT-VAR-NAME"),
- "NODE-MODULES-BUNDLE-FILENAME": constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME,
- "CUSTOM-DEPENDENCIES": " ".join(sort_uniq((deps or []) + ytest.get_values_list(unit, "TEST_DEPENDS_VALUE"))),
- }
-
- if test_record:
- full_test_record.update(test_record)
-
- data = ytest.dump_test(unit, full_test_record)
+ flat_args = (test_type,)
+
+ dart_record = create_dart_record(fields, unit, flat_args, {})
+ dart_record[df.TestFiles.KEY] = test_files
+ dart_record[df.NodeModulesBundleFilename.KEY] = constants.NODE_MODULES_WORKSPACE_BUNDLE_FILENAME
+
+ extra_deps = df.CustomDependencies.value3(unit, (), {})[df.CustomDependencies.KEY].split()
+ dart_record[df.CustomDependencies.KEY] = " ".join(sort_uniq(deps + extra_deps))
+ dart_record[df.TsConfigPath.KEY] = tsconfig_path
+
+ data = ytest.dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
+ unit.set(["TEST_RECIPES_VALUE", user_recipes])
+
+
+def _resolve_module_files(unit, mod_dir, file_paths):
+ mod_dir_with_sep_len = len(mod_dir) + 1
+ resolved_files = []
+
+ for path in file_paths:
+ resolved = rootrel_arc_src(path, unit)
+ if resolved.startswith(mod_dir):
+ resolved = resolved[mod_dir_with_sep_len:]
+ resolved_files.append(resolved)
+
+ return resolved_files
def _set_resource_vars(unit, erm_json, tool, version, nodejs_major=None):
@@ -661,40 +794,12 @@ def on_ts_test_for_configure(unit, test_runner, default_config, node_modules_fil
if not _is_tests_enabled(unit):
return
- if unit.enabled('TS_COVERAGE'):
- unit.on_peerdir_ts_resource("nyc")
-
- for_mod_path = unit.get("TS_TEST_FOR_PATH")
- unit.onpeerdir([for_mod_path])
- unit.on_setup_extract_node_modules_recipe([for_mod_path])
- unit.on_setup_extract_output_tars_recipe([for_mod_path])
-
- root = "$B" if test_runner == "hermione" else "$(BUILD_ROOT)"
- unit.set(["TS_TEST_NM", os.path.join(root, for_mod_path, node_modules_filename)])
-
- config_path = unit.get("TS_TEST_CONFIG_PATH")
- if not config_path:
- config_path = os.path.join(for_mod_path, default_config)
- unit.set(["TS_TEST_CONFIG_PATH", config_path])
-
- test_record = _add_ts_resources_to_test_record(
- unit,
- {
- "TS-TEST-FOR-PATH": for_mod_path,
- "TS-TEST-DATA-DIRS": ytest.serialize_list(_get_ts_test_data_dirs(unit)),
- "TS-TEST-DATA-DIRS-RENAME": unit.get("_TS_TEST_DATA_DIRS_RENAME_VALUE"),
- },
- )
-
- test_files = ytest.get_values_list(unit, "_TS_TEST_SRCS_VALUE")
- test_files = _resolve_module_files(unit, unit.get("MODDIR"), test_files)
- if not test_files:
- ymake.report_configure_error("No tests found")
- return
-
- deps = _create_pm(unit).get_peers_from_package_json()
- add_ts_test = _get_test_runner_handlers()[test_runner]
- add_ts_test(unit, test_runner, test_files, deps, test_record)
+ if test_runner == 'jest':
+ _add_jest_ts_test(unit, default_config, node_modules_filename)
+ elif test_runner == 'hermione':
+ _add_hermione_ts_test(unit, default_config, node_modules_filename)
+ elif test_runner == 'playwright':
+ _add_playwright_ts_test(unit, default_config, node_modules_filename)
@_with_report_configure_error
diff --git a/build/plugins/ya.make b/build/plugins/ya.make
index 85c885c619..a9bbd0d9b9 100644
--- a/build/plugins/ya.make
+++ b/build/plugins/ya.make
@@ -7,6 +7,7 @@ STYLE_PYTHON()
PY_SRCS(
TOP_LEVEL
_common.py
+ _dart_fields.py
_requirements.py
_xsyn_includes.py
bundle.py
diff --git a/build/plugins/ytest.py b/build/plugins/ytest.py
index 04ad1bd4bc..cc7ac9da88 100644
--- a/build/plugins/ytest.py
+++ b/build/plugins/ytest.py
@@ -1,31 +1,32 @@
from __future__ import print_function
+import base64
+import collections
+import copy
+import json
import os
import re
-import sys
import six
-import json
-import copy
-import base64
-import shlex
-import _common
-import lib.test_const as consts
-import _requirements as reqs
-
-from collections.abc import Buffer
+import subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
-import subprocess
-import collections
-import ymake
-CANON_DATA_DIR_NAME = 'canondata'
-CANON_OUTPUT_STORAGE = 'canondata_storage'
-CANON_RESULT_FILE_NAME = 'result.json'
+import _common
+import _dart_fields as df
+import _requirements as reqs
+import lib.test_const as consts
+import ymake
+from _dart_fields import (
+ serialize_list,
+ get_unit_list_variable,
+ deserialize_list,
+ prepare_env,
+ create_dart_record,
+)
BLOCK_SEPARATOR = '============================================================='
SPLIT_FACTOR_MAX_VALUE = 1000
@@ -37,6 +38,66 @@ PROJECT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_pro
KTLINT_CURRENT_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint/.editorconfig"
KTLINT_OLD_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint_old/.editorconfig"
+YTEST_FIELDS_BASE = (
+ df.AndroidApkTestActivity.value,
+ df.BinaryPath.value,
+ df.BuildFolderPath.value,
+ df.CustomDependencies.value,
+ df.GlobalLibraryPath.value,
+ df.ScriptRelPath.value,
+ df.SkipTest.value,
+ df.SourceFolderPath.value,
+ df.SplitFactor.value,
+ df.TestCwd.value,
+ df.TestedProjectFilename.value,
+ df.TestedProjectName.value,
+ df.TestEnv.value,
+ df.TestIosDeviceType.value,
+ df.TestIosRuntimeType.value,
+ df.TestRecipes.value,
+)
+
+YTEST_FIELDS_EXTRA = (
+ df.Blob.value,
+ df.ForkMode.value,
+ df.Size.value,
+ df.Tag.value,
+ df.TestTimeout.value,
+ df.YtSpec.value,
+)
+
+PY_EXEC_FIELDS_BASE = (
+ df.Blob.value,
+ df.BuildFolderPath.value2,
+ df.CanonizeSubPath.value,
+ df.CustomDependencies.value3,
+ df.ForkMode.value2,
+ df.ForkTestFiles.value,
+ df.PythonPaths.value,
+ df.Requirements.value4,
+ df.Size.value2,
+ df.SkipTest.value,
+ df.SourceFolderPath.value,
+ df.SplitFactor.value2,
+ df.Tag.value,
+ df.TestCwd.value2,
+ df.TestData.value5,
+ df.TestEnv.value,
+ df.TestFiles.value5,
+ df.TestPartition.value,
+ df.TestRecipes.value,
+ df.TestTimeout.value2,
+ df.UseArcadiaPython.value,
+)
+
+CHECK_FIELDS_BASE = (
+ df.CustomDependencies.value2,
+ df.Requirements.value3,
+ df.ScriptRelPath.value2,
+ df.TestEnv.value,
+ df.TestName.value3,
+ df.UseArcadiaPython.value,
+)
tidy_config_map = None
@@ -45,25 +106,6 @@ def ontest_data(unit, *args):
ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
-def format_recipes(data: str | None) -> str:
- if not data:
- return ""
-
- data = data.replace('"USE_RECIPE_DELIM"', "\n")
- data = data.replace("$TEST_RECIPES_VALUE", "")
- return data
-
-
-def prepare_recipes(data: str | None) -> Buffer:
- formatted = format_recipes(data)
- return base64.b64encode(six.ensure_binary(formatted))
-
-
-def prepare_env(data):
- data = data.replace("$TEST_ENV_VALUE", "")
- return serialize_list(shlex.split(data))
-
-
def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + perf test for configure
pool_re = re.compile(r"""['"]*pool['"]*\s*?=""")
cypress_root_re = re.compile(r"""['"]*cypress_root['"]*\s*=""")
@@ -355,15 +397,6 @@ def dump_test(unit, kw):
return data
-def serialize_list(lst):
- lst = list(filter(None, lst))
- return '\"' + ';'.join(lst) + '\"' if lst else ''
-
-
-def deserialize_list(val):
- return list(filter(None, val.replace('"', "").split(";")))
-
-
def reference_group_var(varname: str, extensions: list[str] | None = None) -> str:
if extensions is None:
return f'"${{join=\\;:{varname}}}"'
@@ -379,25 +412,6 @@ def count_entries(x):
return x.count(";") + 1
-def get_values_list(unit, key):
- res = map(str.strip, (unit.get(key) or '').replace('$' + key, '').strip().split())
- return [r for r in res if r and r not in ['""', "''"]]
-
-
-def get_norm_paths(unit, key):
- # return paths without trailing (back)slash
- return [x.rstrip('\\/').replace('${ARCADIA_ROOT}/', '') for x in get_values_list(unit, key)]
-
-
-def get_unit_list_variable(unit, name):
- items = unit.get(name)
- if items:
- items = items.split(' ')
- assert items[0] == "${}".format(name), (items, name)
- return items[1:]
- return []
-
-
def implies(a, b):
return bool((not a) or b)
@@ -461,26 +475,16 @@ def get_project_tidy_config(unit):
return get_default_tidy_config(unit)
-def java_srcdirs_to_data(unit, var):
- extra_data = []
- for srcdir in (unit.get(var) or '').replace('$' + var, '').split():
- if srcdir == '.':
- srcdir = unit.get('MODDIR')
- if srcdir.startswith('${ARCADIA_ROOT}/') or srcdir.startswith('$ARCADIA_ROOT/'):
- srcdir = srcdir.replace('${ARCADIA_ROOT}/', '$S/')
- srcdir = srcdir.replace('$ARCADIA_ROOT/', '$S/')
- if srcdir.startswith('${CURDIR}') or srcdir.startswith('$CURDIR'):
- srcdir = srcdir.replace('${CURDIR}', os.path.join('$S', unit.get('MODDIR')))
- srcdir = srcdir.replace('$CURDIR', os.path.join('$S', unit.get('MODDIR')))
- srcdir = unit.resolve_arc_path(srcdir)
- if not srcdir.startswith('$'):
- srcdir = os.path.join('$S', unit.get('MODDIR'), srcdir)
- if srcdir.startswith('$S'):
- extra_data.append(srcdir.replace('$S', 'arcadia'))
- return serialize_list(extra_data)
-
-
-def check_data(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.SbrUidExt.value,
+ df.TestFiles.value,
+ )
+)
+def check_data(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -496,49 +500,26 @@ def check_data(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
- test_files = flat_args[1:]
-
- uid_ext = unit.get("SBR_UID_EXT").split(" ", 1)[-1] # strip variable name
-
- data_re = re.compile(r"sbr:/?/?(\d+)=?.*")
- data = flat_args[1:]
- resources = []
- for f in data:
- matched = re.match(data_re, f)
- if matched:
- resources.append(matched.group(1))
- if resources:
- test_files = resources
- else:
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
+ if not dart_record[df.TestFiles.KEY]:
return
- serialized_test_files = serialize_list(test_files)
-
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'SCRIPT-REL-PATH': 'check.data',
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'SBR-UID-EXT': uid_ext,
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- }
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def check_resource(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.SbrUidExt.value,
+ df.TestFiles.value2,
+ )
+)
+def check_resource(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -554,37 +535,28 @@ def check_resource(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
-
- test_files = flat_args[1:]
-
- uid_ext = unit.get("SBR_UID_EXT").split(" ", 1)[-1] # strip variable name
- serialized_test_files = serialize_list(test_files)
-
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'SCRIPT-REL-PATH': 'check.resource',
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'SBR-UID-EXT': uid_ext,
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def ktlint(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.TestData.value3,
+ df.TestFiles.value2,
+ df.ModuleLang.value,
+ df.KtlintBinary.value,
+ df.UseKtlintOld.value,
+ df.KtlintBaselineFile.value,
+ )
+)
+def ktlint(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -600,53 +572,29 @@ def ktlint(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
-
- extra_test_dart_data = {}
- test_files = flat_args[1:]
- if unit.get('_USE_KTLINT_OLD') == 'yes':
- extra_test_data = serialize_list([KTLINT_OLD_EDITOR_CONFIG])
- extra_test_dart_data['KTLINT_BINARY'] = '$(KTLINT_OLD)/run.bat'
- extra_test_dart_data['USE_KTLINT_OLD'] = 'yes'
- else:
- data_list = [KTLINT_CURRENT_EDITOR_CONFIG]
- baseline_path_relative = unit.get('_KTLINT_BASELINE_FILE')
- if baseline_path_relative:
- baseline_path = unit.resolve_arc_path(baseline_path_relative).replace('$S', 'arcadia')
- data_list += [baseline_path]
- extra_test_dart_data['KTLINT_BASELINE_FILE'] = baseline_path_relative
- extra_test_data = serialize_list(data_list)
- extra_test_dart_data['KTLINT_BINARY'] = '$(KTLINT)/run.bat'
-
- serialized_test_files = serialize_list(test_files)
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
+ dart_record[df.TestTimeout.KEY] = '120'
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'TEST-TIMEOUT': '120',
- 'SCRIPT-REL-PATH': 'ktlint',
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-DATA': extra_test_data,
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
- test_record.update(extra_test_dart_data)
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def java_style(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.TestData.value4,
+ df.ForkMode.value2,
+ df.TestFiles.value3,
+ df.JdkLatestVersion.value,
+ df.JdkResource.value,
+ df.ModuleLang.value,
+ )
+)
+def java_style(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -662,57 +610,32 @@ def java_style(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
-
- ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
- test_files = flat_args[1:]
-
if len(flat_args) < 2:
raise Exception("Not enough arguments for JAVA_STYLE check")
- check_level = flat_args[1]
- allowed_levels = {
- 'base': '/yandex_checks.xml',
- 'strict': '/yandex_checks_strict.xml',
- 'extended': '/yandex_checks_extended.xml',
- 'library': '/yandex_checks_library.xml',
- }
- if check_level not in allowed_levels:
- raise Exception("'{}' is not allowed in LINT(), use one of {}".format(check_level, allowed_levels.keys()))
- test_files[0] = allowed_levels[check_level] # replace check_level with path to config file
# jstyle should use the latest jdk
unit.onpeerdir([unit.get('JDK_LATEST_PEERDIR')])
- serialized_test_files = serialize_list(test_files)
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
+ dart_record[df.TestTimeout.KEY] = '240'
+ dart_record[df.ScriptRelPath.KEY] = 'java.style'
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'TEST-TIMEOUT': '240',
- 'SCRIPT-REL-PATH': "java.style",
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-DATA': java_srcdirs_to_data(unit, 'ALL_SRCDIRS') if ymake_java_test else '',
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'FORK-MODE': unit.get('TEST_FORK_MODE'),
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- 'JDK_LATEST_VERSION': unit.get('JDK_LATEST_VERSION'),
- 'JDK_RESOURCE': 'JDK' + (unit.get('JDK_VERSION') or unit.get('JDK_REAL_VERSION') or '_DEFAULT'),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def gofmt(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value3,
+ df.SourceFolderPath.value2,
+ df.ForkMode.value2,
+ df.TestFiles.value2,
+ df.ModuleLang.value,
+ )
+)
+def gofmt(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -728,38 +651,25 @@ def gofmt(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
-
- test_files = flat_args[1:]
- if test_files:
- test_dir = os.path.dirname(test_files[0]).lstrip("$S/")
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- serialized_test_files = serialize_list(test_files)
-
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'SCRIPT-REL-PATH': 'gofmt',
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def govet(unit, *args):
+@df.with_fields(
+ CHECK_FIELDS_BASE
+ + (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.ForkMode.value2,
+ df.TestFiles.value2,
+ df.ModuleLang.value,
+ )
+)
+def govet(fields, unit, *args):
flat_args, spec_args = _common.sort_by_keywords(
{
"DEPENDS": -1,
@@ -775,28 +685,10 @@ def govet(unit, *args):
},
args,
)
- check_type = flat_args[0]
-
- test_dir = _common.get_norm_unit_path(unit)
- test_files = flat_args[1:]
- serialized_test_files = serialize_list(test_files)
- test_record = {
- 'TEST-NAME': check_type.lower(),
- 'SCRIPT-REL-PATH': 'govet',
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'REQUIREMENTS': " ".join(spec_args.get('REQUIREMENTS', [])),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': serialized_test_files,
- 'TEST-FILES': serialized_test_files,
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
@@ -843,144 +735,106 @@ def on_register_no_check_imports(unit):
unit.onresource(['-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
-def onadd_check_py_imports(unit, *args):
+@df.with_fields(
+ (
+ df.TestedProjectName.value2,
+ df.SourceFolderPath.value,
+ df.TestEnv.value,
+ df.UseArcadiaPython.value,
+ df.TestFiles.value4,
+ df.ModuleLang.value,
+ df.NoCheck.value,
+ )
+)
+def onadd_check_py_imports(fields, unit, *args):
if unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
return
if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
return
unit.onpeerdir(['library/python/testing/import_test'])
- check_type = "py.imports"
- test_dir = _common.get_norm_unit_path(unit)
- test_files = serialize_list([_common.get_norm_unit_path(unit, unit.filename())])
- test_record = {
- 'TEST-NAME': "pyimports",
- 'SCRIPT-REL-PATH': check_type,
- 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
- 'SOURCE-FOLDER-PATH': test_dir,
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON'),
- # TODO remove FILES, see DEVTOOLS-7052
- 'FILES': test_files,
- 'TEST-FILES': test_files,
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
- if unit.get('NO_CHECK_IMPORTS_FOR_VALUE') != "None":
- test_record["NO-CHECK"] = serialize_list(get_values_list(unit, 'NO_CHECK_IMPORTS_FOR_VALUE') or ["*"])
+ dart_record = create_dart_record(fields, unit, (), {})
+ dart_record[df.TestName.KEY] = 'pyimports'
+ dart_record[df.ScriptRelPath.KEY] = 'py.imports'
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def onadd_pytest_bin(unit, *args):
+@df.with_fields(
+ PY_EXEC_FIELDS_BASE
+ + (
+ df.TestName.value4,
+ df.ScriptRelPath.value3,
+ df.TestedProjectName.value4,
+ df.ModuleLang.value,
+ df.BinaryPath.value2,
+ df.TestRunnerBin.value,
+ )
+)
+def onadd_pytest_bin(fields, unit, *args):
if unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
return
- flat, kws = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
- if flat:
- ymake.report_configure_error('Unknown arguments found while processing add_pytest_bin macro: {!r}'.format(flat))
-
- runner_bin = kws.get('RUNNER_BIN', [None])[0]
- test_type = 'py3test.bin' if (unit.get("PYTHON3") == 'yes') else "pytest.bin"
+ flat_args, spec_args = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
+ if flat_args:
+ ymake.report_configure_error(
+ 'Unknown arguments found while processing add_pytest_bin macro: {!r}'.format(flat_args)
+ )
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
- timeout = list(filter(None, [unit.get(["TEST_TIMEOUT"])]))
- if timeout:
- timeout = timeout[0]
- else:
- timeout = '0'
- yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
- unit.ondata_files(yt_spec)
-
- unit_path = unit.path()
- test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
- tags = _get_test_tags(unit)
- requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
- test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- data, _ = get_canonical_test_resources(unit)
- test_data += data
- python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
- binary_path = os.path.join(unit_path, unit.filename())
-
- script_rel_path = test_type
-
- unit_path = unit.path()
- fork_test_files = unit.get('FORK_TEST_FILES_MODE')
- fork_mode = unit.get('TEST_FORK_MODE').split() or ''
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
- use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
- test_cwd = unit.get('TEST_CWD_VALUE') or ''
- if test_cwd:
- test_cwd = test_cwd.replace("$TEST_CWD_VALUE", "").replace('"MACRO_CALLS_DELIM"', "").strip()
- test_name = os.path.basename(binary_path)
- test_record = {
- 'TEST-NAME': os.path.splitext(test_name)[0],
- 'TEST-TIMEOUT': timeout,
- 'SCRIPT-REL-PATH': script_rel_path,
- 'TESTED-PROJECT-NAME': test_name,
- 'SOURCE-FOLDER-PATH': _common.get_norm_unit_path(unit),
- 'CUSTOM-DEPENDENCIES': " ".join(custom_deps),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'SPLIT-FACTOR': unit.get('TEST_SPLIT_FACTOR'),
- 'TEST_PARTITION': unit.get('TEST_PARTITION'),
- 'FORK-MODE': fork_mode,
- 'FORK-TEST-FILES': fork_test_files,
- 'TEST-FILES': serialize_list(test_files),
- 'SIZE': unit.get('TEST_SIZE_NAME'),
- 'TAG': serialize_list(sorted(tags)),
- 'REQUIREMENTS': serialize_list(requirements),
- 'USE_ARCADIA_PYTHON': use_arcadia_python,
- 'OLD_PYTEST': 'no',
- 'PYTHON-PATHS': serialize_list(python_paths),
- 'TEST-CWD': test_cwd,
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'BUILD-FOLDER-PATH': _common.strip_roots(unit_path),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'CANONIZE_SUB_PATH': unit.get('CANONIZE_SUB_PATH'),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
- if binary_path:
- test_record['BINARY-PATH'] = _common.strip_roots(binary_path)
- if runner_bin:
- test_record['TEST-RUNNER-BIN'] = runner_bin
+
+ yt_spec = df.YtSpec.value2(unit, flat_args, spec_args)
+ if yt_spec and yt_spec[df.YtSpec.KEY]:
+ unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
+
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
+ dart_record[df.OldPytest.KEY] = 'no'
if yt_spec:
- test_record['YT-SPEC'] = serialize_list(yt_spec)
+ dart_record |= yt_spec
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def extract_java_system_properties(unit, args):
- if len(args) % 2:
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
-
- props = []
- for x, y in zip(args[::2], args[1::2]):
- if x == 'FILE':
- if y.startswith('${BINDIR}') or y.startswith('${ARCADIA_BUILD_ROOT}') or y.startswith('/'):
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
-
- y = _common.rootrel_arc_src(y, unit)
- if not os.path.exists(unit.resolve('$S/' + y)):
- return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
-
- y = '${ARCADIA_ROOT}/' + y
- props.append({'type': 'file', 'path': y})
- else:
- props.append({'type': 'inline', 'key': x, 'value': y})
-
- return props, None
-
-
-def onjava_test(unit, *args):
+@df.with_fields(
+ (
+ df.SourceFolderPath.value,
+ df.TestName.value5,
+ df.ScriptRelPath.value4,
+ df.TestTimeout.value3,
+ df.TestedProjectName.value5,
+ df.TestEnv.value,
+ df.TestData.value6,
+ df.ForkMode.value2,
+ df.SplitFactor.value2,
+ df.CustomDependencies.value3,
+ df.Tag.value,
+ df.Size.value2,
+ df.Requirements.value2,
+ df.TestRecipes.value,
+ df.ModuleType.value,
+ df.UnittestDir.value,
+ df.JvmArgs.value,
+ # TODO optimize, SystemProperties is used in TestData
+ df.SystemProperties.value,
+ df.TestCwd.value,
+ df.SkipTest.value,
+ df.JavaClasspathCmdType.value,
+ df.JdkResource.value,
+ df.JdkForTests.value,
+ df.ModuleLang.value,
+ df.TestClasspath.value,
+ df.TestClasspathOrigins.value,
+ df.TestClasspathDeps.value,
+ df.TestJar.value,
+ )
+)
+def onjava_test(fields, unit, *args):
if unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
return
@@ -992,92 +846,36 @@ def onjava_test(unit, *args):
ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
return
- java_cp_arg_type = unit.get('JAVA_CLASSPATH_CMD_TYPE_VALUE') or 'MANIFEST'
- if java_cp_arg_type not in ('MANIFEST', 'COMMAND_FILE', 'LIST'):
- ymake.report_configure_error(
- '{}: TEST_JAVA_CLASSPATH_CMD_TYPE({}) are invalid. Choose argument from MANIFEST, COMMAND_FILE or LIST)'.format(
- unit.path(), java_cp_arg_type
- )
- )
- return
-
- unit_path = unit.path()
- path = _common.strip_roots(unit_path)
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- yt_spec_values = get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')
- unit.ondata_files(yt_spec_values)
-
- test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- test_data.append('arcadia/build/scripts/run_junit.py')
- test_data.append('arcadia/build/scripts/unpacking_jtest_runner.py')
- data, data_files = get_canonical_test_resources(unit)
- test_data += data
+ yt_spec = df.YtSpec.value3(unit, (), {})
+ unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
- props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
- if error_mgs:
- ymake.report_configure_error(error_mgs)
+ try:
+ dart_record = create_dart_record(fields, unit, (), {})
+ except df.DartValueError:
return
- for prop in props:
- if prop['type'] == 'file':
- test_data.append(prop['path'].replace('${ARCADIA_ROOT}', 'arcadia'))
+ dart_record |= yt_spec
- props = base64.b64encode(six.ensure_binary(json.dumps(props)))
-
- if unit.get('MODULE_TYPE') == 'JUNIT5':
- script_rel_path = 'junit5.test'
- else:
- script_rel_path = 'junit.test'
-
- ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
- test_record = {
- 'SOURCE-FOLDER-PATH': path,
- 'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path)]),
- 'SCRIPT-REL-PATH': script_rel_path,
- 'TEST-TIMEOUT': unit.get('TEST_TIMEOUT'),
- 'TESTED-PROJECT-NAME': path,
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
- 'FORK-MODE': unit.get('TEST_FORK_MODE'),
- 'SPLIT-FACTOR': unit.get('TEST_SPLIT_FACTOR'),
- 'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TAG': serialize_list(sorted(_get_test_tags(unit))),
- 'SIZE': unit.get('TEST_SIZE_NAME'),
- 'REQUIREMENTS': serialize_list(get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- # JTEST/JTEST_FOR only
- 'MODULE_TYPE': unit.get('MODULE_TYPE'),
- 'UNITTEST_DIR': unit.get('UNITTEST_DIR'),
- 'JVM_ARGS': serialize_list(get_values_list(unit, 'JVM_ARGS_VALUE')),
- 'SYSTEM_PROPERTIES': props,
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'), # TODO: validate test_cwd value
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'JAVA_CLASSPATH_CMD_TYPE': java_cp_arg_type,
- 'JDK_RESOURCE': 'JDK' + (unit.get('JDK_VERSION') or unit.get('JDK_REAL_VERSION') or '_DEFAULT'),
- 'JDK_FOR_TESTS': 'JDK' + (unit.get('JDK_VERSION') or unit.get('JDK_REAL_VERSION') or '_DEFAULT') + '_FOR_TESTS',
- 'YT-SPEC': serialize_list(yt_spec_values),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
- test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
- if test_classpath_origins:
- test_record['TEST_CLASSPATH_ORIGINS'] = test_classpath_origins
- test_record['TEST_CLASSPATH'] = '${TEST_CLASSPATH_MANAGED}'
- elif ymake_java_test:
- test_record['TEST_CLASSPATH'] = '${DART_CLASSPATH}'
- test_record['TEST_CLASSPATH_DEPS'] = '${DART_CLASSPATH_DEPS}'
- if unit.get('UNITTEST_DIR'):
- test_record['TEST_JAR'] = '${UNITTEST_MOD}'
- else:
- test_record['TEST_JAR'] = '{}/{}.jar'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(['DART_DATA', data])
-def onjava_test_deps(unit, *args):
+@df.with_fields(
+ (
+ df.SourceFolderPath.value,
+ df.TestName.value6,
+ df.TestedProjectName.value5,
+ df.CustomDependencies.value3,
+ df.IgnoreClasspathClash.value,
+ df.ModuleType.value,
+ df.ModuleLang.value,
+ df.Classpath.value,
+ )
+)
+def onjava_test_deps(fields, unit, *args):
if unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
return
@@ -1086,49 +884,15 @@ def onjava_test_deps(unit, *args):
assert len(args) == 1
mode = args[0]
- path = _common.get_norm_unit_path(unit)
- ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
-
- test_record = {
- 'SOURCE-FOLDER-PATH': path,
- 'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path), 'dependencies']).strip(
- '-'
- ),
- 'SCRIPT-REL-PATH': 'java.dependency.test',
- 'TESTED-PROJECT-NAME': path,
- 'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'IGNORE_CLASSPATH_CLASH': ' '.join(get_values_list(unit, 'JAVA_IGNORE_CLASSPATH_CLASH_VALUE')),
- # JTEST/JTEST_FOR only
- 'MODULE_TYPE': unit.get('MODULE_TYPE'),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, (args[0],), {})
+ dart_record[df.ScriptRelPath.KEY] = 'java.dependency.test'
if mode == 'strict':
- test_record['STRICT_CLASSPATH_CLASH'] = 'yes'
-
- if ymake_java_test:
- test_record['CLASSPATH'] = '$B/{}/{}.jar ${{DART_CLASSPATH}}'.format(
- unit.get('MODDIR'), unit.get('REALPRJNAME')
- )
+ dart_record[df.StrictClasspathClash.KEY] = 'yes'
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
unit.set_property(['DART_DATA', data])
-def _get_test_tags(unit, spec_args=None):
- if spec_args is None:
- spec_args = {}
- tags = spec_args.get('TAG', []) + get_values_list(unit, 'TEST_TAGS_VALUE')
- tags = set(tags)
- if unit.get('EXPORT_SEM') == 'yes':
- filter_only_tags = sorted(t for t in tags if ':' not in t)
- unit.set(['FILTER_ONLY_TEST_TAGS', ' '.join(filter_only_tags)])
- # DEVTOOLS-7571
- if unit.get('SKIP_TEST_VALUE') and consts.YaTestTags.Fat in tags:
- tags.add(consts.YaTestTags.NotAutocheck)
-
- return tags
-
-
def onsetup_pytest_bin(unit, *args):
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
if use_arcadia_python:
@@ -1142,7 +906,15 @@ def onrun(unit, *args):
unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
-def onsetup_exectest(unit, *args):
+@df.with_fields(
+ PY_EXEC_FIELDS_BASE
+ + (
+ df.TestName.value7,
+ df.TestedProjectName.value6,
+ df.BinaryPath.value3,
+ )
+)
+def onsetup_exectest(fields, unit, *args):
if unit.get("TIDY") == "yes":
# graph changed for clang_tidy tests
return
@@ -1156,66 +928,18 @@ def onsetup_exectest(unit, *args):
unit.set(["TEST_BLOB_DATA", base64.b64encode(six.ensure_binary(command))])
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
- timeout = list(filter(None, [unit.get(["TEST_TIMEOUT"])]))
- if timeout:
- timeout = timeout[0]
- else:
- timeout = '0'
- split_factor = unit.get('TEST_SPLIT_FACTOR')
- test_cwd = unit.get('TEST_CWD_VALUE')
- yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
- unit.ondata_files(yt_spec)
-
- test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
- tags = _get_test_tags(unit)
- requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
- test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
- data, _ = get_canonical_test_resources(unit)
- test_data += data
- python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
-
- unit_path = unit.path()
- fork_test_files = unit.get('FORK_TEST_FILES_MODE')
- fork_mode = unit.get('TEST_FORK_MODE').split() or ''
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
- use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
- if test_cwd:
- test_cwd = test_cwd.replace("$TEST_CWD_VALUE", "").replace('"MACRO_CALLS_DELIM"', "").strip()
- test_name = os.path.basename(os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
- test_record = {
- 'TEST-NAME': os.path.splitext(test_name)[0],
- 'TEST-TIMEOUT': timeout,
- 'SCRIPT-REL-PATH': "exectest",
- 'TESTED-PROJECT-NAME': test_name,
- 'SOURCE-FOLDER-PATH': _common.get_norm_unit_path(unit),
- 'CUSTOM-DEPENDENCIES': " ".join(custom_deps),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'SPLIT-FACTOR': split_factor,
- 'TEST_PARTITION': unit.get('TEST_PARTITION'),
- 'FORK-MODE': fork_mode,
- 'FORK-TEST-FILES': fork_test_files,
- 'TEST-FILES': serialize_list(test_files),
- 'SIZE': unit.get('TEST_SIZE_NAME'),
- 'TAG': serialize_list(sorted(tags)),
- 'REQUIREMENTS': serialize_list(requirements),
- 'USE_ARCADIA_PYTHON': use_arcadia_python,
- 'OLD_PYTEST': 'no',
- 'PYTHON-PATHS': serialize_list(python_paths),
- 'TEST-CWD': test_cwd,
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'BUILD-FOLDER-PATH': _common.strip_roots(unit_path),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'CANONIZE_SUB_PATH': unit.get('CANONIZE_SUB_PATH'),
- }
- test_record['BINARY-PATH'] = _common.strip_roots(os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
+
+ yt_spec = df.YtSpec.value2(unit, (), {})
+ if yt_spec and yt_spec[df.YtSpec.KEY]:
+ unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
+
+ dart_record = create_dart_record(fields, unit, (), {})
+ dart_record[df.ScriptRelPath.KEY] = 'exectest'
+ dart_record[df.OldPytest.KEY] = 'no'
if yt_spec:
- test_record['YT-SPEC'] = serialize_list(yt_spec)
+ dart_record |= yt_spec
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
@@ -1225,81 +949,6 @@ def onsetup_run_python(unit):
unit.ondepends('contrib/tools/python')
-def get_canonical_test_resources(unit):
- unit_path = unit.path()
- if unit.get("CUSTOM_CANONDATA_PATH"):
- path_to_canondata = unit_path.replace("$S", unit.get("CUSTOM_CANONDATA_PATH"))
- else:
- path_to_canondata = unit.resolve(unit_path)
- canon_data_dir = os.path.join(path_to_canondata, CANON_DATA_DIR_NAME, unit.get('CANONIZE_SUB_PATH') or '')
- try:
- _, dirs, files = next(os.walk(canon_data_dir))
- except StopIteration:
- # path doesn't exist
- return [], []
- if CANON_RESULT_FILE_NAME in files:
- return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
- return [], []
-
-
-def _load_canonical_file(filename, unit_path):
- try:
- with open(filename, 'rb') as results_file:
- return json.load(results_file)
- except Exception as e:
- print("malformed canonical data in {}: {} ({})".format(unit_path, e, filename), file=sys.stderr)
- return {}
-
-
-def _get_resource_from_uri(uri):
- m = consts.CANON_MDS_RESOURCE_REGEX.match(uri)
- if m:
- key = m.group(1)
- return "{}:{}".format(consts.MDS_SCHEME, key)
-
- m = consts.CANON_BACKEND_RESOURCE_REGEX.match(uri)
- if m:
- key = m.group(1)
- return "{}:{}".format(consts.MDS_SCHEME, key)
-
- m = consts.CANON_SBR_RESOURCE_REGEX.match(uri)
- if m:
- # There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
- # That's why we use notation with '=' to specify specific path for resource
- uri = m.group(1)
- res_id = m.group(2)
- return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
-
-
-def _get_external_resources_from_canon_data(data):
- # Method should work with both canonization versions:
- # result.json: {'uri':X 'checksum':Y}
- # result.json: {'testname': {'uri':X 'checksum':Y}}
- # result.json: {'testname': [{'uri':X 'checksum':Y}]}
- # Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
- # That's why we check 'uri' and 'checksum' fields presence
- # (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
- res = set()
-
- if isinstance(data, dict):
- if 'uri' in data and 'checksum' in data:
- resource = _get_resource_from_uri(data['uri'])
- if resource:
- res.add(resource)
- else:
- for k, v in six.iteritems(data):
- res.update(_get_external_resources_from_canon_data(v))
- elif isinstance(data, list):
- for e in data:
- res.update(_get_external_resources_from_canon_data(e))
-
- return res
-
-
-def _get_canonical_data_resources_v2(filename, unit_path):
- return (_get_external_resources_from_canon_data(_load_canonical_file(filename, unit_path)), [filename])
-
-
def on_add_linter_check(unit, *args):
if unit.get("TIDY") == "yes":
return
@@ -1389,7 +1038,15 @@ def on_add_linter_check(unit, *args):
unit.set_property(["DART_DATA", data])
-def clang_tidy(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + (
+ df.TestName.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ )
+)
+def clang_tidy(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1411,38 +1068,25 @@ def clang_tidy(unit, *args):
unit.set(["DEFAULT_TIDY_CONFIG", default_config_path])
unit.set(["PROJECT_TIDY_CONFIG", project_config_path])
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def unittest_py(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ )
+)
+def unittest_py(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1457,65 +1101,25 @@ def unittest_py(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME') or ''
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def gunittest(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ )
+)
+def gunittest(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1530,65 +1134,26 @@ def gunittest(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME') or ''
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def g_benchmark(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ df.BenchmarkOpts.value,
+ )
+)
+def g_benchmark(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1603,68 +1168,25 @@ def g_benchmark(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME') or ''
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
-
- benchmark_opts = get_unit_list_variable(unit, 'BENCHMARK_OPTS_VALUE')
- test_record['BENCHMARK-OPTS'] = serialize_list(benchmark_opts)
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def go_test(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value2,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ )
+)
+def go_test(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1678,71 +1200,26 @@ def go_test(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
-
unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- data, _ = get_canonical_test_resources(unit)
- test_data += data
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def boost_test(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ )
+)
+def boost_test(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1756,67 +1233,28 @@ def boost_test(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
-
unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def fuzz_test(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value2,
+ df.FuzzDicts.value,
+ df.FuzzOpts.value,
+ df.Fuzzing.value,
+ )
+)
+def fuzz_test(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1830,80 +1268,29 @@ def fuzz_test(unit, *args):
if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
unit.ondata_files(_common.get_norm_unit_path(unit))
-
unit.ondata_files("fuzzing/{}/corpus.json".format(_common.get_norm_unit_path(unit)))
-
unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'FUZZ-DICTS': serialize_list(
- spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE')
- ),
- 'FUZZ-OPTS': serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE')),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- }
-
- if unit.get('FUZZING') == 'yes':
- test_record['FUZZING'] = '1'
- # use all cores if fuzzing requested
- test_record['REQUIREMENTS'] = serialize_list(
- filter(None, deserialize_list(test_record['REQUIREMENTS']) + ["cpu:all", "ram:all"])
- )
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def y_benchmark(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.ModuleLang.value,
+ df.BenchmarkOpts.value,
+ )
+)
+def y_benchmark(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1917,68 +1304,24 @@ def y_benchmark(unit, *args):
unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
-
- benchmark_opts = get_unit_list_variable(unit, 'BENCHMARK_OPTS_VALUE')
- test_record['BENCHMARK-OPTS'] = serialize_list(benchmark_opts)
-
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def coverage_extractor(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ )
+)
+def coverage_extractor(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -1992,64 +1335,26 @@ def coverage_extractor(unit, *args):
unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
-
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- }
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])
-def go_bench(unit, *args):
+@df.with_fields(
+ YTEST_FIELDS_BASE
+ + YTEST_FIELDS_EXTRA
+ + (
+ df.TestName.value2,
+ df.TestData.value,
+ df.Requirements.value,
+ df.TestPartition.value,
+ df.GoBenchTimeout.value,
+ df.ModuleLang.value,
+ )
+)
+def go_bench(fields, unit, *args):
keywords = {
"DEPENDS": -1,
"DATA": -1,
@@ -2060,69 +1365,16 @@ def go_bench(unit, *args):
"FORK_TESTS": 0,
}
flat_args, spec_args = _common.sort_by_keywords(keywords, args)
+ tags = df.Tag.value(unit, flat_args, spec_args)[df.Tag.KEY]
- unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
-
- test_data = sorted(
- _common.filter_out_by_keyword(
- spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'
- )
- )
-
- test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME')
- test_tags = serialize_list(sorted(_get_test_tags(unit, spec_args)))
- test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT')
- test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
-
- fork_mode = []
- if 'FORK_SUBTESTS' in spec_args:
- fork_mode.append('subtests')
- if 'FORK_TESTS' in spec_args:
- fork_mode.append('tests')
- fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
- fork_mode = ' '.join(fork_mode) if fork_mode else ''
-
- unit_path = _common.get_norm_unit_path(unit)
+ if "ya:run_go_benchmark" not in tags:
+ return
- test_record = {
- 'TEST-NAME': flat_args[0],
- 'SCRIPT-REL-PATH': flat_args[1],
- 'TESTED-PROJECT-NAME': unit.name(),
- 'TESTED-PROJECT-FILENAME': unit.filename(),
- 'SOURCE-FOLDER-PATH': unit_path,
- # TODO get rid of BUILD-FOLDER-PATH
- 'BUILD-FOLDER-PATH': unit_path,
- 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
- 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
- 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
- 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
- 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
- # 'TEST-PRESERVE-ENV': 'da',
- 'TEST-DATA': serialize_list(sorted(test_data)),
- 'TEST-TIMEOUT': test_timeout,
- 'FORK-MODE': fork_mode,
- 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR'),
- 'SIZE': test_size,
- 'TAG': test_tags,
- 'REQUIREMENTS': serialize_list(test_requirements),
- 'TEST-CWD': unit.get('TEST_CWD_VALUE'),
- 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
- 'BLOB': unit.get('TEST_BLOB_DATA'),
- 'SKIP_TEST': unit.get('SKIP_TEST_VALUE'),
- 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE'),
- 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE'),
- 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE'),
- 'TEST_PARTITION': unit.get("TEST_PARTITION"),
- 'GO_BENCH_TIMEOUT': unit.get('GO_BENCH_TIMEOUT'),
- 'MODULE_LANG': unit.get("MODULE_LANG").lower() or consts.ModuleLang.UNKNOWN,
- }
+ unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
- if "ya:run_go_benchmark" not in test_record["TAG"]:
- return
- else:
- test_record["TEST-NAME"] += "_bench"
+ dart_record = create_dart_record(fields, unit, flat_args, spec_args)
- data = dump_test(unit, test_record)
+ data = dump_test(unit, dart_record)
if data:
unit.set_property(["DART_DATA", data])