aboutsummaryrefslogtreecommitdiffstats
path: root/library/python
diff options
context:
space:
mode:
authorbulatman <bulatman@yandex-team.ru>2022-02-10 16:45:50 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:50 +0300
commit2f6ca198245aeffd5e2d82b65927c2465b68b4f5 (patch)
tree9142afc54d335ea52910662635b898e79e192e49 /library/python
parent6560e4993b14d193f8c879e33a3de5e5eba6e21d (diff)
downloadydb-2f6ca198245aeffd5e2d82b65927c2465b68b4f5.tar.gz
Restoring authorship annotation for <bulatman@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'library/python')
-rw-r--r--library/python/pytest/plugins/fixtures.py48
-rw-r--r--library/python/testing/yatest_common/yatest/common/benchmark.py6
2 files changed, 27 insertions, 27 deletions
diff --git a/library/python/pytest/plugins/fixtures.py b/library/python/pytest/plugins/fixtures.py
index 8035a2bb20..6f7e0a27e4 100644
--- a/library/python/pytest/plugins/fixtures.py
+++ b/library/python/pytest/plugins/fixtures.py
@@ -1,6 +1,6 @@
import os
import pytest
-import six
+import six
MAX_ALLOWED_LINKS_COUNT = 10
@@ -21,35 +21,35 @@ def metrics(request):
@classmethod
def set_benchmark(cls, benchmark_values):
- # report of google has key 'benchmarks' which is a list of benchmark results
- # yandex benchmark has key 'benchmark', which is a list of benchmark results
- # use this to differentiate which kind of result it is
- if 'benchmarks' in benchmark_values:
- cls.set_gbenchmark(benchmark_values)
- else:
- cls.set_ybenchmark(benchmark_values)
-
- @classmethod
- def set_ybenchmark(cls, benchmark_values):
+ # report of google has key 'benchmarks' which is a list of benchmark results
+ # yandex benchmark has key 'benchmark', which is a list of benchmark results
+ # use this to differentiate which kind of result it is
+ if 'benchmarks' in benchmark_values:
+ cls.set_gbenchmark(benchmark_values)
+ else:
+ cls.set_ybenchmark(benchmark_values)
+
+ @classmethod
+ def set_ybenchmark(cls, benchmark_values):
for benchmark in benchmark_values["benchmark"]:
name = benchmark["name"]
for key, value in six.iteritems(benchmark):
if key != "name":
cls.set("{}_{}".format(name, key), value)
- @classmethod
- def set_gbenchmark(cls, benchmark_values):
- time_unit_multipliers = {"ns": 1, "us": 1000, "ms": 1000000}
- time_keys = {"real_time", "cpu_time"}
- ignore_keys = {"name", "run_name", "time_unit", "run_type", "repetition_index"}
- for benchmark in benchmark_values["benchmarks"]:
- name = benchmark["name"].replace('/', '_') # ci does not work properly with '/' in metric name
- time_unit_mult = time_unit_multipliers[benchmark.get("time_unit", "ns")]
- for k, v in six.iteritems(benchmark):
- if k in time_keys:
- cls.set("{}_{}".format(name, k), v * time_unit_mult)
- elif k not in ignore_keys and isinstance(v, (float, int)):
- cls.set("{}_{}".format(name, k), v)
+ @classmethod
+ def set_gbenchmark(cls, benchmark_values):
+ time_unit_multipliers = {"ns": 1, "us": 1000, "ms": 1000000}
+ time_keys = {"real_time", "cpu_time"}
+ ignore_keys = {"name", "run_name", "time_unit", "run_type", "repetition_index"}
+ for benchmark in benchmark_values["benchmarks"]:
+ name = benchmark["name"].replace('/', '_') # ci does not work properly with '/' in metric name
+ time_unit_mult = time_unit_multipliers[benchmark.get("time_unit", "ns")]
+ for k, v in six.iteritems(benchmark):
+ if k in time_keys:
+ cls.set("{}_{}".format(name, k), v * time_unit_mult)
+ elif k not in ignore_keys and isinstance(v, (float, int)):
+ cls.set("{}_{}".format(name, k), v)
return Metrics
diff --git a/library/python/testing/yatest_common/yatest/common/benchmark.py b/library/python/testing/yatest_common/yatest/common/benchmark.py
index 766d3a8968..c3784cbe4c 100644
--- a/library/python/testing/yatest_common/yatest/common/benchmark.py
+++ b/library/python/testing/yatest_common/yatest/common/benchmark.py
@@ -8,12 +8,12 @@ def execute_benchmark(path, budget=None, threads=None):
"""
Run benchmark and return values
:param path: path to benchmark binary
- :param budget: time budget, sec (supported only by ybenchmark)
- :param threads: number of threads to run benchmark (supported only by ybenchmark)
+ :param budget: time budget, sec (supported only by ybenchmark)
+ :param threads: number of threads to run benchmark (supported only by ybenchmark)
:return: map of benchmark values
"""
benchmark_path = runtime.binary_path(path)
- cmd = [benchmark_path, "--benchmark_format=json"]
+ cmd = [benchmark_path, "--benchmark_format=json"]
if budget is not None:
cmd += ["-b", str(budget)]
if threads is not None: