diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2024-05-24 12:52:43 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2024-05-24 13:00:05 +0300 |
commit | 17b724c0bfc83bc671073210ba805e7e833f29d8 (patch) | |
tree | b671cf351fec7970a12ca91d7d98b507eaf91c86 | |
parent | d2e0d3650d4af88fe27cbe7a6b2bc1a44f10694b (diff) | |
download | ydb-17b724c0bfc83bc671073210ba805e7e833f29d8.tar.gz |
Intermediate changes
28 files changed, 1667 insertions, 980 deletions
diff --git a/contrib/libs/cxxsupp/libcxxmsvc/include/__config b/contrib/libs/cxxsupp/libcxxmsvc/include/__config index 83c45fbe43..bcb49f7720 100644 --- a/contrib/libs/cxxsupp/libcxxmsvc/include/__config +++ b/contrib/libs/cxxsupp/libcxxmsvc/include/__config @@ -34,7 +34,7 @@ #elif defined(_MSC_VER) #define _LIBCPP_COMPILER_MSVC -#if _MSVC_LANG == 201705L +#if _MSVC_LANG >= 201705L # define _LIBCPP_STD_VER 20 #elif _MSVC_LANG == 201703L # define _LIBCPP_STD_VER 17 diff --git a/contrib/restricted/google/benchmark/AUTHORS b/contrib/restricted/google/benchmark/AUTHORS index d08c1fdb87..2170e46fd4 100644 --- a/contrib/restricted/google/benchmark/AUTHORS +++ b/contrib/restricted/google/benchmark/AUTHORS @@ -31,6 +31,7 @@ Evgeny Safronov <division494@gmail.com> Fabien Pichot <pichot.fabien@gmail.com> Federico Ficarelli <federico.ficarelli@gmail.com> Felix Homann <linuxaudio@showlabor.de> +Gergely Meszaros <maetveis@gmail.com> Gergő Szitár <szitar.gergo@gmail.com> Google Inc. Henrique Bucher <hbucher@gmail.com> diff --git a/contrib/restricted/google/benchmark/include/benchmark/benchmark.h b/contrib/restricted/google/benchmark/include/benchmark/benchmark.h index d53d761ad8..869b32e378 100644 --- a/contrib/restricted/google/benchmark/include/benchmark/benchmark.h +++ b/contrib/restricted/google/benchmark/include/benchmark/benchmark.h @@ -302,6 +302,9 @@ class BenchmarkReporter; // Default number of minimum benchmark running time in seconds. const char kDefaultMinTimeStr[] = "0.5s"; +// Returns the version of the library. +BENCHMARK_EXPORT std::string GetBenchmarkVersion(); + BENCHMARK_EXPORT void PrintDefaultHelp(); BENCHMARK_EXPORT void Initialize(int* argc, char** argv, @@ -341,7 +344,7 @@ BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); // The second and third overload use the specified 'display_reporter' and // 'file_reporter' respectively. 'file_reporter' will write to the file // specified -// by '--benchmark_output'. If '--benchmark_output' is not given the +// by '--benchmark_out'. If '--benchmark_out' is not given the // 'file_reporter' is ignored. // // RETURNS: The number of matching benchmarks. @@ -584,6 +587,12 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } #endif #else +#ifdef BENCHMARK_HAS_CXX11 +template <class Tp> +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { + internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value)); +} +#else template <class Tp> BENCHMARK_DEPRECATED_MSG( "The const-ref version of this method can permit " @@ -591,6 +600,12 @@ BENCHMARK_DEPRECATED_MSG( inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value)); } + +template <class Tp> +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value)); +} +#endif // FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11. #endif @@ -660,13 +675,15 @@ typedef std::map<std::string, Counter> UserCounters; // calculated automatically to the best fit. enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; +typedef int64_t ComplexityN; + typedef int64_t IterationCount; enum StatisticUnit { kTime, kPercentage }; // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. -typedef double(BigOFunc)(IterationCount); +typedef double(BigOFunc)(ComplexityN); // StatisticsFunc is passed to a benchmark in order to compute some descriptive // statistics over all the measurements of some type @@ -734,13 +751,13 @@ class BENCHMARK_EXPORT State { // have been called previously. // // NOTE: KeepRunning may not be used after calling either of these functions. - BENCHMARK_ALWAYS_INLINE StateIterator begin(); - BENCHMARK_ALWAYS_INLINE StateIterator end(); + inline BENCHMARK_ALWAYS_INLINE StateIterator begin(); + inline BENCHMARK_ALWAYS_INLINE StateIterator end(); // Returns true if the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. - bool KeepRunning(); + inline bool KeepRunning(); // Returns true iff the benchmark should run n more iterations. // REQUIRES: 'n' > 0. @@ -752,7 +769,7 @@ class BENCHMARK_EXPORT State { // while (state.KeepRunningBatch(1000)) { // // process 1000 elements // } - bool KeepRunningBatch(IterationCount n); + inline bool KeepRunningBatch(IterationCount n); // REQUIRES: timer is running and 'SkipWithMessage(...)' or // 'SkipWithError(...)' has not been called by the current thread. @@ -863,10 +880,12 @@ class BENCHMARK_EXPORT State { // and complexity_n will // represent the length of N. BENCHMARK_ALWAYS_INLINE - void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } + void SetComplexityN(ComplexityN complexity_n) { + complexity_n_ = complexity_n; + } BENCHMARK_ALWAYS_INLINE - int64_t complexity_length_n() const { return complexity_n_; } + ComplexityN complexity_length_n() const { return complexity_n_; } // If this routine is called with items > 0, then an items/s // label is printed on the benchmark report line for the currently @@ -955,7 +974,7 @@ class BENCHMARK_EXPORT State { // items we don't need on the first cache line std::vector<int64_t> range_; - int64_t complexity_n_; + ComplexityN complexity_n_; public: // Container for user-defined counters. @@ -970,7 +989,7 @@ class BENCHMARK_EXPORT State { void StartKeepRunning(); // Implementation of KeepRunning() and KeepRunningBatch(). // is_batch must be true unless n is 1. - bool KeepRunningInternal(IterationCount n, bool is_batch); + inline bool KeepRunningInternal(IterationCount n, bool is_batch); void FinishKeepRunning(); const std::string name_; @@ -1504,7 +1523,7 @@ class Fixture : public internal::Benchmark { // /* Registers a benchmark named "BM_takes_args/int_string_test` */ // BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); #define BENCHMARK_CAPTURE(func, test_case_name, ...) \ - BENCHMARK_PRIVATE_DECLARE(func) = \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ new ::benchmark::internal::FunctionBenchmark( \ #func "/" #test_case_name, \ @@ -1541,6 +1560,31 @@ class Fixture : public internal::Benchmark { #define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) #endif +#ifdef BENCHMARK_HAS_CXX11 +// This will register a benchmark for a templatized function, +// with the additional arguments specified by `...`. +// +// For example: +// +// template <typename T, class ...ExtraArgs>` +// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { +// [...] +//} +// /* Registers a benchmark named "BM_takes_args<void>/int_string_test` */ +// BENCHMARK_TEMPLATE1_CAPTURE(BM_takes_args, void, int_string_test, 42, +// std::string("abc")); +#define BENCHMARK_TEMPLATE1_CAPTURE(func, a, test_case_name, ...) \ + BENCHMARK_CAPTURE(func<a>, test_case_name, __VA_ARGS__) + +#define BENCHMARK_TEMPLATE2_CAPTURE(func, a, b, test_case_name, ...) \ + BENCHMARK_PRIVATE_DECLARE(func) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #func "<" #a "," #b ">" \ + "/" #test_case_name, \ + [](::benchmark::State& st) { func<a, b>(st, __VA_ARGS__); }))) +#endif // BENCHMARK_HAS_CXX11 + #define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ @@ -1748,6 +1792,7 @@ class BENCHMARK_EXPORT BenchmarkReporter { real_accumulated_time(0), cpu_accumulated_time(0), max_heapbytes_used(0), + use_real_time_for_initial_big_o(false), complexity(oNone), complexity_lambda(), complexity_n(0), @@ -1793,10 +1838,14 @@ class BENCHMARK_EXPORT BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; + // By default Big-O is computed for CPU time, but that is not what you want + // to happen when manual time was requested, which is stored as real time. + bool use_real_time_for_initial_big_o; + // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; - int64_t complexity_n; + ComplexityN complexity_n; // what statistics to compute from the measurements const std::vector<internal::Statistics>* statistics; diff --git a/contrib/restricted/google/benchmark/src/benchmark.cc b/contrib/restricted/google/benchmark/src/benchmark.cc index 6139e59d05..337bb3faa7 100644 --- a/contrib/restricted/google/benchmark/src/benchmark.cc +++ b/contrib/restricted/google/benchmark/src/benchmark.cc @@ -152,8 +152,16 @@ BENCHMARK_EXPORT std::map<std::string, std::string>*& GetGlobalContext() { return global_context; } -// FIXME: wouldn't LTO mess this up? -void UseCharPointer(char const volatile*) {} +static void const volatile* volatile global_force_escape_pointer; + +// FIXME: Verify if LTO still messes this up? +void UseCharPointer(char const volatile* const v) { + // We want to escape the pointer `v` so that the compiler can not eliminate + // computations that produced it. To do that, we escape the pointer by storing + // it into a volatile variable, since generally, volatile store, is not + // something the compiler is allowed to elide. + global_force_escape_pointer = reinterpret_cast<void const volatile*>(v); +} } // namespace internal @@ -399,7 +407,8 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks, benchmarks_with_threads += (benchmark.threads() > 1); runners.emplace_back(benchmark, &perfcounters, reports_for_family); int num_repeats_of_this_instance = runners.back().GetNumRepeats(); - num_repetitions_total += num_repeats_of_this_instance; + num_repetitions_total += + static_cast<size_t>(num_repeats_of_this_instance); if (reports_for_family) reports_for_family->num_runs_total += num_repeats_of_this_instance; } @@ -577,12 +586,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, Err << "A custom file reporter was provided but " "--benchmark_out=<file> was not specified." << std::endl; + Out.flush(); + Err.flush(); std::exit(1); } if (!fname.empty()) { output_file.open(fname); if (!output_file.is_open()) { Err << "invalid file name: '" << fname << "'" << std::endl; + Out.flush(); + Err.flush(); std::exit(1); } if (!file_reporter) { @@ -597,10 +610,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, } std::vector<internal::BenchmarkInstance> benchmarks; - if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; + if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) { + Out.flush(); + Err.flush(); + return 0; + } if (benchmarks.empty()) { Err << "Failed to match any benchmarks against regex: " << spec << "\n"; + Out.flush(); + Err.flush(); return 0; } @@ -611,6 +630,8 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); } + Out.flush(); + Err.flush(); return benchmarks.size(); } @@ -736,6 +757,14 @@ int InitializeStreams() { } // end namespace internal +std::string GetBenchmarkVersion() { +#ifdef BENCHMARK_VERSION + return {BENCHMARK_VERSION}; +#else + return {""}; +#endif +} + void PrintDefaultHelp() { fprintf(stdout, "benchmark" diff --git a/contrib/restricted/google/benchmark/src/benchmark_register.cc b/contrib/restricted/google/benchmark/src/benchmark_register.cc index e447c9a2d3..8ade048225 100644 --- a/contrib/restricted/google/benchmark/src/benchmark_register.cc +++ b/contrib/restricted/google/benchmark/src/benchmark_register.cc @@ -482,8 +482,9 @@ int Benchmark::ArgsCnt() const { const char* Benchmark::GetArgName(int arg) const { BM_CHECK_GE(arg, 0); - BM_CHECK_LT(arg, static_cast<int>(arg_names_.size())); - return arg_names_[arg].c_str(); + size_t uarg = static_cast<size_t>(arg); + BM_CHECK_LT(uarg, arg_names_.size()); + return arg_names_[uarg].c_str(); } TimeUnit Benchmark::GetTimeUnit() const { diff --git a/contrib/restricted/google/benchmark/src/benchmark_register.h b/contrib/restricted/google/benchmark/src/benchmark_register.h index 53367c707c..be50265f72 100644 --- a/contrib/restricted/google/benchmark/src/benchmark_register.h +++ b/contrib/restricted/google/benchmark/src/benchmark_register.h @@ -24,7 +24,7 @@ typename std::vector<T>::iterator AddPowers(std::vector<T>* dst, T lo, T hi, static const T kmax = std::numeric_limits<T>::max(); // Space out the values in multiples of "mult" - for (T i = static_cast<T>(1); i <= hi; i *= static_cast<T>(mult)) { + for (T i = static_cast<T>(1); i <= hi; i = static_cast<T>(i * mult)) { if (i >= lo) { dst->push_back(i); } @@ -52,7 +52,7 @@ void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) { const auto it = AddPowers(dst, hi_complement, lo_complement, mult); - std::for_each(it, dst->end(), [](T& t) { t *= -1; }); + std::for_each(it, dst->end(), [](T& t) { t = static_cast<T>(t * -1); }); std::reverse(it, dst->end()); } diff --git a/contrib/restricted/google/benchmark/src/benchmark_runner.cc b/contrib/restricted/google/benchmark/src/benchmark_runner.cc index 5f683fe423..5714587196 100644 --- a/contrib/restricted/google/benchmark/src/benchmark_runner.cc +++ b/contrib/restricted/google/benchmark/src/benchmark_runner.cc @@ -64,7 +64,7 @@ MemoryManager* memory_manager = nullptr; namespace { -static constexpr IterationCount kMaxIterations = 1000000000; +static constexpr IterationCount kMaxIterations = 1000000000000; const double kDefaultMinTime = std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr); @@ -97,6 +97,7 @@ BenchmarkReporter::Run CreateRunReport( } else { report.real_accumulated_time = results.real_time_used; } + report.use_real_time_for_initial_big_o = b.use_manual_time(); report.cpu_accumulated_time = results.cpu_time_used; report.complexity_n = results.complexity_n; report.complexity = b.complexity(); @@ -109,7 +110,7 @@ BenchmarkReporter::Run CreateRunReport( report.memory_result = memory_result; report.allocs_per_iter = memory_iterations ? static_cast<double>(memory_result->num_allocs) / - memory_iterations + static_cast<double>(memory_iterations) : 0; } @@ -235,7 +236,7 @@ BenchmarkRunner::BenchmarkRunner( has_explicit_iteration_count(b.iterations() != 0 || parsed_benchtime_flag.tag == BenchTimeType::ITERS), - pool(b.threads() - 1), + pool(static_cast<size_t>(b.threads() - 1)), iters(has_explicit_iteration_count ? ComputeIters(b_, parsed_benchtime_flag) : 1), @@ -327,8 +328,8 @@ IterationCount BenchmarkRunner::PredictNumItersNeeded( // So what seems to be the sufficiently-large iteration count? Round up. const IterationCount max_next_iters = static_cast<IterationCount>( - std::lround(std::max(multiplier * static_cast<double>(i.iters), - static_cast<double>(i.iters) + 1.0))); + std::llround(std::max(multiplier * static_cast<double>(i.iters), + static_cast<double>(i.iters) + 1.0))); // But we do have *some* limits though.. const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); diff --git a/contrib/restricted/google/benchmark/src/colorprint.cc b/contrib/restricted/google/benchmark/src/colorprint.cc index 0bfd67041d..abc71492f7 100644 --- a/contrib/restricted/google/benchmark/src/colorprint.cc +++ b/contrib/restricted/google/benchmark/src/colorprint.cc @@ -140,12 +140,12 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, // We need to flush the stream buffers into the console before each // SetConsoleTextAttribute call lest it affect the text that is already // printed but has not yet reached the console. - fflush(stdout); + out.flush(); SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY); - vprintf(fmt, args); + out << FormatString(fmt, args); - fflush(stdout); + out.flush(); // Restores the text color. SetConsoleTextAttribute(stdout_handle, old_color_attrs); #else diff --git a/contrib/restricted/google/benchmark/src/complexity.cc b/contrib/restricted/google/benchmark/src/complexity.cc index 825c57394a..eee3122646 100644 --- a/contrib/restricted/google/benchmark/src/complexity.cc +++ b/contrib/restricted/google/benchmark/src/complexity.cc @@ -37,12 +37,14 @@ BigOFunc* FittingCurve(BigO complexity) { return [](IterationCount n) -> double { return std::pow(n, 3); }; case oLogN: /* Note: can't use log2 because Android's GNU STL lacks it */ - return - [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); }; + return [](IterationCount n) { + return kLog2E * std::log(static_cast<double>(n)); + }; case oNLogN: /* Note: can't use log2 because Android's GNU STL lacks it */ return [](IterationCount n) { - return kLog2E * n * log(static_cast<double>(n)); + return kLog2E * static_cast<double>(n) * + std::log(static_cast<double>(n)); }; case o1: default: @@ -75,12 +77,12 @@ std::string GetBigOString(BigO complexity) { // given by the lambda expression. // - n : Vector containing the size of the benchmark tests. // - time : Vector containing the times for the benchmark tests. -// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). +// - fitting_curve : lambda expression (e.g. [](ComplexityN n) {return n; };). // For a deeper explanation on the algorithm logic, please refer to // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics -LeastSq MinimalLeastSq(const std::vector<int64_t>& n, +LeastSq MinimalLeastSq(const std::vector<ComplexityN>& n, const std::vector<double>& time, BigOFunc* fitting_curve) { double sigma_gn_squared = 0.0; @@ -105,12 +107,12 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n, double rms = 0.0; for (size_t i = 0; i < n.size(); ++i) { double fit = result.coef * fitting_curve(n[i]); - rms += pow((time[i] - fit), 2); + rms += std::pow((time[i] - fit), 2); } // Normalized RMS by the mean of the observed values - double mean = sigma_time / n.size(); - result.rms = sqrt(rms / n.size()) / mean; + double mean = sigma_time / static_cast<double>(n.size()); + result.rms = std::sqrt(rms / static_cast<double>(n.size())) / mean; return result; } @@ -122,7 +124,7 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n, // - complexity : If different than oAuto, the fitting curve will stick to // this one. If it is oAuto, it will be calculated the best // fitting curve. -LeastSq MinimalLeastSq(const std::vector<int64_t>& n, +LeastSq MinimalLeastSq(const std::vector<ComplexityN>& n, const std::vector<double>& time, const BigO complexity) { BM_CHECK_EQ(n.size(), time.size()); BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two @@ -162,7 +164,7 @@ std::vector<BenchmarkReporter::Run> ComputeBigO( if (reports.size() < 2) return results; // Accumulators. - std::vector<int64_t> n; + std::vector<ComplexityN> n; std::vector<double> real_time; std::vector<double> cpu_time; @@ -171,8 +173,10 @@ std::vector<BenchmarkReporter::Run> ComputeBigO( BM_CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; n.push_back(run.complexity_n); - real_time.push_back(run.real_accumulated_time / run.iterations); - cpu_time.push_back(run.cpu_accumulated_time / run.iterations); + real_time.push_back(run.real_accumulated_time / + static_cast<double>(run.iterations)); + cpu_time.push_back(run.cpu_accumulated_time / + static_cast<double>(run.iterations)); } LeastSq result_cpu; @@ -182,8 +186,19 @@ std::vector<BenchmarkReporter::Run> ComputeBigO( result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); } else { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + const BigO* InitialBigO = &reports[0].complexity; + const bool use_real_time_for_initial_big_o = + reports[0].use_real_time_for_initial_big_o; + if (use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + InitialBigO = &result_real.complexity; + // The Big-O complexity for CPU time must have the same Big-O function! + } + result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO); + InitialBigO = &result_cpu.complexity; + if (!use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + } } // Drop the 'args' when reporting complexity. diff --git a/contrib/restricted/google/benchmark/src/console_reporter.cc b/contrib/restricted/google/benchmark/src/console_reporter.cc index 10e05e133e..35c3de2a4d 100644 --- a/contrib/restricted/google/benchmark/src/console_reporter.cc +++ b/contrib/restricted/google/benchmark/src/console_reporter.cc @@ -42,11 +42,15 @@ bool ConsoleReporter::ReportContext(const Context& context) { PrintBasicContext(&GetErrorStream(), context); #ifdef BENCHMARK_OS_WINDOWS - if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { - GetErrorStream() - << "Color printing is only supported for stdout on windows." - " Disabling color printing\n"; - output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color); + if ((output_options_ & OO_Color)) { + auto stdOutBuf = std::cout.rdbuf(); + auto outStreamBuf = GetOutputStream().rdbuf(); + if (stdOutBuf != outStreamBuf) { + GetErrorStream() + << "Color printing is only supported for stdout on windows." + " Disabling color printing\n"; + output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color); + } } #endif diff --git a/contrib/restricted/google/benchmark/src/counter.cc b/contrib/restricted/google/benchmark/src/counter.cc index cf5b78ee3a..aa14cd8092 100644 --- a/contrib/restricted/google/benchmark/src/counter.cc +++ b/contrib/restricted/google/benchmark/src/counter.cc @@ -27,10 +27,10 @@ double Finish(Counter const& c, IterationCount iterations, double cpu_time, v /= num_threads; } if (c.flags & Counter::kIsIterationInvariant) { - v *= iterations; + v *= static_cast<double>(iterations); } if (c.flags & Counter::kAvgIterations) { - v /= iterations; + v /= static_cast<double>(iterations); } if (c.flags & Counter::kInvert) { // Invert is *always* last. diff --git a/contrib/restricted/google/benchmark/src/csv_reporter.cc b/contrib/restricted/google/benchmark/src/csv_reporter.cc index 7b56da107e..4b39e2c52f 100644 --- a/contrib/restricted/google/benchmark/src/csv_reporter.cc +++ b/contrib/restricted/google/benchmark/src/csv_reporter.cc @@ -122,13 +122,21 @@ void CSVReporter::PrintRunData(const Run& run) { } Out << ","; - Out << run.GetAdjustedRealTime() << ","; - Out << run.GetAdjustedCPUTime() << ","; + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + Out << run.GetAdjustedRealTime() << ","; + Out << run.GetAdjustedCPUTime() << ","; + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + Out << run.real_accumulated_time << ","; + Out << run.cpu_accumulated_time << ","; + } // Do not print timeLabel on bigO and RMS report if (run.report_big_o) { Out << GetBigOString(run.complexity); - } else if (!run.report_rms) { + } else if (!run.report_rms && + run.aggregate_unit != StatisticUnit::kPercentage) { Out << GetTimeUnitString(run.time_unit); } Out << ","; diff --git a/contrib/restricted/google/benchmark/src/cycleclock.h b/contrib/restricted/google/benchmark/src/cycleclock.h index 1295880b2e..36aa8e3c76 100644 --- a/contrib/restricted/google/benchmark/src/cycleclock.h +++ b/contrib/restricted/google/benchmark/src/cycleclock.h @@ -70,7 +70,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // frequency scaling). Also note that when the Mac sleeps, this // counter pauses; it does not continue counting, nor does it // reset to zero. - return mach_absolute_time(); + return static_cast<int64_t>(mach_absolute_time()); #elif defined(BENCHMARK_OS_EMSCRIPTEN) // this goes above x86-specific code because old versions of Emscripten // define __x86_64__, although they have nothing to do with it. @@ -82,7 +82,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__x86_64__) || defined(__amd64__) uint64_t low, high; __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; + return static_cast<int64_t>((high << 32) | low); #elif defined(__powerpc__) || defined(__ppc__) // This returns a time-base, which is not always precisely a cycle-count. #if defined(__powerpc64__) || defined(__ppc64__) @@ -181,23 +181,25 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__s390__) // Covers both s390 and s390x. // Return the CPU clock. uint64_t tsc; -#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL) - // z/OS XL compiler HLASM syntax. +#if defined(BENCHMARK_OS_ZOS) + // z/OS HLASM syntax. asm(" stck %0" : "=m"(tsc) : : "cc"); #else + // Linux on Z syntax. asm("stck %0" : "=Q"(tsc) : : "cc"); #endif return tsc; #elif defined(__riscv) // RISC-V - // Use RDCYCLE (and RDCYCLEH on riscv32) + // Use RDTIME (and RDTIMEH on riscv32). + // RDCYCLE is a privileged instruction since Linux 6.6. #if __riscv_xlen == 32 uint32_t cycles_lo, cycles_hi0, cycles_hi1; // This asm also includes the PowerPC overflow handling strategy, as above. // Implemented in assembly because Clang insisted on branching. asm volatile( - "rdcycleh %0\n" - "rdcycle %1\n" - "rdcycleh %2\n" + "rdtimeh %0\n" + "rdtime %1\n" + "rdtimeh %2\n" "sub %0, %0, %2\n" "seqz %0, %0\n" "sub %0, zero, %0\n" @@ -206,7 +208,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo; #else uint64_t cycles; - asm volatile("rdcycle %0" : "=r"(cycles)); + asm volatile("rdtime %0" : "=r"(cycles)); return cycles; #endif #elif defined(__e2k__) || defined(__elbrus__) @@ -217,10 +219,20 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { uint64_t pcycle; asm volatile("%0 = C15:14" : "=r"(pcycle)); return static_cast<double>(pcycle); +#elif defined(__alpha__) + // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit + // integer and thus wraps every ~4s, making using it for tick counts + // unreliable beyond this time range. The real-time clock is low-precision, + // roughtly ~1ms, but it is the only option that can reasonable count + // indefinitely. + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; #else -// The soft failover to a generic implementation is automatic only for ARM. -// For other platforms the developer is expected to make an attempt to create -// a fast implementation and use generic version if nothing better is available. + // The soft failover to a generic implementation is automatic only for ARM. + // For other platforms the developer is expected to make an attempt to create + // a fast implementation and use generic version if nothing better is + // available. #error You need to define CycleTimer for your OS and CPU #endif } diff --git a/contrib/restricted/google/benchmark/src/internal_macros.h b/contrib/restricted/google/benchmark/src/internal_macros.h index 8dd7d0c650..f4894ba8e6 100644 --- a/contrib/restricted/google/benchmark/src/internal_macros.h +++ b/contrib/restricted/google/benchmark/src/internal_macros.h @@ -11,11 +11,7 @@ #endif #if defined(__clang__) - #if defined(__ibmxl__) - #if !defined(COMPILER_IBMXL) - #define COMPILER_IBMXL - #endif - #elif !defined(COMPILER_CLANG) + #if !defined(COMPILER_CLANG) #define COMPILER_CLANG #endif #elif defined(_MSC_VER) diff --git a/contrib/restricted/google/benchmark/src/json_reporter.cc b/contrib/restricted/google/benchmark/src/json_reporter.cc index 6559dfd5e6..b8c8c94c08 100644 --- a/contrib/restricted/google/benchmark/src/json_reporter.cc +++ b/contrib/restricted/google/benchmark/src/json_reporter.cc @@ -167,12 +167,19 @@ bool JSONReporter::ReportContext(const Context& context) { } out << "],\n"; + out << indent << FormatKV("library_version", GetBenchmarkVersion()); + out << ",\n"; + #if defined(NDEBUG) const char build_type[] = "release"; #else const char build_type[] = "debug"; #endif out << indent << FormatKV("library_build_type", build_type); + out << ",\n"; + + // NOTE: our json schema is not strictly tied to the library version! + out << indent << FormatKV("json_schema_version", int64_t(1)); std::map<std::string, std::string>* global_context = internal::GetGlobalContext(); diff --git a/contrib/restricted/google/benchmark/src/perf_counters.cc b/contrib/restricted/google/benchmark/src/perf_counters.cc index 9f564b46bb..3b204fd1cd 100644 --- a/contrib/restricted/google/benchmark/src/perf_counters.cc +++ b/contrib/restricted/google/benchmark/src/perf_counters.cc @@ -39,7 +39,8 @@ size_t PerfCounterValues::Read(const std::vector<int>& leaders) { auto read_bytes = ::read(lead, ptr, size); if (read_bytes >= ssize_t(sizeof(uint64_t))) { // Actual data bytes are all bytes minus initial padding - std::size_t data_bytes = read_bytes - sizeof(uint64_t); + std::size_t data_bytes = + static_cast<std::size_t>(read_bytes) - sizeof(uint64_t); // This should be very cheap since it's in hot cache std::memmove(ptr, ptr + sizeof(uint64_t), data_bytes); // Increment our counters @@ -254,7 +255,7 @@ bool PerfCounters::IsCounterSupported(const std::string&) { return false; } PerfCounters PerfCounters::Create( const std::vector<std::string>& counter_names) { if (!counter_names.empty()) { - GetErrorLogInstance() << "Performance counters not supported."; + GetErrorLogInstance() << "Performance counters not supported.\n"; } return NoCounters(); } diff --git a/contrib/restricted/google/benchmark/src/statistics.cc b/contrib/restricted/google/benchmark/src/statistics.cc index 844e926895..16b60261fd 100644 --- a/contrib/restricted/google/benchmark/src/statistics.cc +++ b/contrib/restricted/google/benchmark/src/statistics.cc @@ -32,7 +32,7 @@ auto StatisticsSum = [](const std::vector<double>& v) { double StatisticsMean(const std::vector<double>& v) { if (v.empty()) return 0.0; - return StatisticsSum(v) * (1.0 / v.size()); + return StatisticsSum(v) * (1.0 / static_cast<double>(v.size())); } double StatisticsMedian(const std::vector<double>& v) { @@ -71,8 +71,11 @@ double StatisticsStdDev(const std::vector<double>& v) { // Sample standard deviation is undefined for n = 1 if (v.size() == 1) return 0.0; - const double avg_squares = SumSquares(v) * (1.0 / v.size()); - return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); + const double avg_squares = + SumSquares(v) * (1.0 / static_cast<double>(v.size())); + return Sqrt(static_cast<double>(v.size()) / + (static_cast<double>(v.size()) - 1.0) * + (avg_squares - Sqr(mean))); } double StatisticsCV(const std::vector<double>& v) { @@ -81,6 +84,8 @@ double StatisticsCV(const std::vector<double>& v) { const auto stddev = StatisticsStdDev(v); const auto mean = StatisticsMean(v); + if (std::fpclassify(mean) == FP_ZERO) return 0.0; + return stddev / mean; } @@ -92,7 +97,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats( auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const& run) { return run.skipped; }); - if (reports.size() - error_count < 2) { + if (reports.size() - static_cast<size_t>(error_count) < 2) { // We don't report aggregated data if there was a single run. return results; } @@ -174,7 +179,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats( // Similarly, if there are N repetitions with 1 iterations each, // an aggregate will be computed over N measurements, not 1. // Thus it is best to simply use the count of separate reports. - data.iterations = reports.size(); + data.iterations = static_cast<IterationCount>(reports.size()); data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); diff --git a/contrib/restricted/google/benchmark/src/string_util.cc b/contrib/restricted/google/benchmark/src/string_util.cc index c69e40a813..9ba63a700a 100644 --- a/contrib/restricted/google/benchmark/src/string_util.cc +++ b/contrib/restricted/google/benchmark/src/string_util.cc @@ -56,7 +56,7 @@ void ToExponentAndMantissa(double val, int precision, double one_k, scaled /= one_k; if (scaled <= big_threshold) { mantissa_stream << scaled; - *exponent = i + 1; + *exponent = static_cast<int64_t>(i + 1); *mantissa = mantissa_stream.str(); return; } diff --git a/contrib/restricted/google/benchmark/src/sysinfo.cc b/contrib/restricted/google/benchmark/src/sysinfo.cc index ff05e32f39..17746e124f 100644 --- a/contrib/restricted/google/benchmark/src/sysinfo.cc +++ b/contrib/restricted/google/benchmark/src/sysinfo.cc @@ -15,6 +15,10 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS +#if !defined(WINVER) || WINVER < 0x0600 +#undef WINVER +#define WINVER 0x0600 +#endif // WINVER handling #include <shlwapi.h> #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include <versionhelpers.h> @@ -158,7 +162,7 @@ ValueUnion GetSysctlImp(std::string const& name) { mib[1] = HW_CPUSPEED; } - if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { + if (sysctl(mib, 2, buff.data(), &buff.size, nullptr, 0) == -1) { return ValueUnion(); } return buff; @@ -346,7 +350,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() { CPUInfo::CacheInfo C; C.num_sharing = static_cast<int>(b.count()); C.level = cache.Level; - C.size = cache.Size; + C.size = static_cast<int>(cache.Size); C.type = "Unknown"; switch (cache.Type) { case CacheUnified: @@ -456,6 +460,8 @@ std::string GetSystemName() { #define HOST_NAME_MAX 256 #elif defined(BENCHMARK_OS_SOLARIS) #define HOST_NAME_MAX MAXHOSTNAMELEN +#elif defined(BENCHMARK_OS_ZOS) +#define HOST_NAME_MAX _POSIX_HOST_NAME_MAX #else #pragma message("HOST_NAME_MAX not defined. using 64") #define HOST_NAME_MAX 64 @@ -468,27 +474,25 @@ std::string GetSystemName() { #endif // Catch-all POSIX block. } -int GetNumCPUs() { +int GetNumCPUsImpl() { #ifdef BENCHMARK_HAS_SYSCTL int num_cpu = -1; if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu; - fprintf(stderr, "Err: %s\n", strerror(errno)); - std::exit(EXIT_FAILURE); + PrintErrorAndDie("Err: ", strerror(errno)); #elif defined(BENCHMARK_OS_WINDOWS) SYSTEM_INFO sysinfo; // Use memset as opposed to = {} to avoid GCC missing initializer false // positives. std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group + // number of logical processors in the current group + return static_cast<int>(sysinfo.dwNumberOfProcessors); #elif defined(BENCHMARK_OS_SOLARIS) // Returns -1 in case of a failure. long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); if (num_cpu < 0) { - fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", - strerror(errno)); + PrintErrorAndDie("sysconf(_SC_NPROCESSORS_ONLN) failed with error: ", + strerror(errno)); } return (int)num_cpu; #elif defined(BENCHMARK_OS_QNX) @@ -504,10 +508,13 @@ int GetNumCPUs() { int max_id = -1; std::ifstream f("/proc/cpuinfo"); if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failed to open /proc/cpuinfo"); } +#if defined(__alpha__) + const std::string Key = "cpus detected"; +#else const std::string Key = "processor"; +#endif std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; @@ -530,12 +537,10 @@ int GetNumCPUs() { } } if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failure reading /proc/cpuinfo"); } if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failed to read to end of /proc/cpuinfo"); } f.close(); @@ -549,6 +554,16 @@ int GetNumCPUs() { BENCHMARK_UNREACHABLE(); } +int GetNumCPUs() { + const int num_cpus = GetNumCPUsImpl(); + if (num_cpus < 1) { + PrintErrorAndDie( + "Unable to extract number of CPUs. If your platform uses " + "/proc/cpuinfo, custom support may need to be added."); + } + return num_cpus; +} + class ThreadAffinityGuard final { public: ThreadAffinityGuard() : reset_affinity(SetAffinity()) { @@ -651,7 +666,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { &freq)) { // The value is in kHz (as the file name suggests). For example, on a // 2GHz warpstation, the file contains the value "2000000". - return freq * 1000.0; + return static_cast<double>(freq) * 1000.0; } const double error_value = -1; @@ -719,9 +734,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { #endif unsigned long long hz = 0; #if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(freqStr, &hz)) return hz * 1000000; + if (GetSysctl(freqStr, &hz)) return static_cast<double>(hz * 1000000); #else - if (GetSysctl(freqStr, &hz)) return hz; + if (GetSysctl(freqStr, &hz)) return static_cast<double>(hz); #endif fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", freqStr, strerror(errno)); @@ -771,8 +786,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { kstat_close(kc); return clock_hz; #elif defined(BENCHMARK_OS_QNX) - return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * - (int64_t)(1000 * 1000)); + return static_cast<double>( + static_cast<int64_t>(SYSPAGE_ENTRY(cpuinfo)->speed) * + static_cast<int64_t>(1000 * 1000)); #elif defined(BENCHMARK_OS_QURT) // QuRT doesn't provide any API to query Hexagon frequency. return 1000000000; @@ -820,7 +836,7 @@ std::vector<double> GetLoadAvg() { !(defined(__ANDROID__) && __ANDROID_API__ < 29) static constexpr int kMaxSamples = 3; std::vector<double> res(kMaxSamples, 0.0); - const int nelem = getloadavg(res.data(), kMaxSamples); + const size_t nelem = static_cast<size_t>(getloadavg(res.data(), kMaxSamples)); if (nelem < 1) { res.clear(); } else { diff --git a/contrib/restricted/google/benchmark/src/timers.cc b/contrib/restricted/google/benchmark/src/timers.cc index 042895d0d4..c392649715 100644 --- a/contrib/restricted/google/benchmark/src/timers.cc +++ b/contrib/restricted/google/benchmark/src/timers.cc @@ -102,7 +102,8 @@ double MakeTime(thread_basic_info_data_t const& info) { #endif #if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) double MakeTime(struct timespec const& ts) { - return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9); + return static_cast<double>(ts.tv_sec) + + (static_cast<double>(ts.tv_nsec) * 1e-9); } #endif @@ -181,6 +182,9 @@ double ThreadCPUUsage() { // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_ZOS) + // z/OS doesn't support CLOCK_THREAD_CPUTIME_ID. + return ProcessCPUUsage(); #elif defined(BENCHMARK_OS_SOLARIS) struct rusage ru; if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); @@ -241,9 +245,9 @@ std::string LocalDateTimeString() { tz_offset_sign = '-'; } - tz_len = + tz_len = static_cast<size_t>( ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", - tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + tz_offset_sign, offset_minutes / 100, offset_minutes % 100)); BM_CHECK(tz_len == kTzOffsetLen); ((void)tz_len); // Prevent unused variable warning in optimized build. } else { diff --git a/contrib/restricted/google/benchmark/test/benchmark_gtest.cc b/contrib/restricted/google/benchmark/test/benchmark_gtest.cc index 2c9e555d92..0aa2552c1e 100644 --- a/contrib/restricted/google/benchmark/test/benchmark_gtest.cc +++ b/contrib/restricted/google/benchmark/test/benchmark_gtest.cc @@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, FullRange8) { std::vector<int8_t> dst; - AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), int8_t{8}); + AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8); EXPECT_THAT( dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); } diff --git a/contrib/restricted/google/benchmark/test/statistics_gtest.cc b/contrib/restricted/google/benchmark/test/statistics_gtest.cc index 1de2d87d4b..48c77260fd 100644 --- a/contrib/restricted/google/benchmark/test/statistics_gtest.cc +++ b/contrib/restricted/google/benchmark/test/statistics_gtest.cc @@ -28,8 +28,8 @@ TEST(StatisticsTest, StdDev) { TEST(StatisticsTest, CV) { EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.); - EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), - 0.32888184094918121); + ASSERT_NEAR(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), + 0.32888184094918121, 1e-15); } } // end namespace diff --git a/contrib/restricted/google/benchmark/tools/compare/compare.py b/contrib/restricted/google/benchmark/tools/compare/compare.py index e5eeb247e6..7572520cc0 100755 --- a/contrib/restricted/google/benchmark/tools/compare/compare.py +++ b/contrib/restricted/google/benchmark/tools/compare/compare.py @@ -1,17 +1,20 @@ #!/usr/bin/env python3 -import unittest +# type: ignore + """ compare.py - versatile benchmark output compare tool """ import argparse -from argparse import ArgumentParser import json -import sys import os +import sys +import unittest +from argparse import ArgumentParser + import gbench -from gbench import util, report +from gbench import report, util def check_inputs(in1, in2, flags): @@ -20,163 +23,203 @@ def check_inputs(in1, in2, flags): """ in1_kind, in1_err = util.classify_input_file(in1) in2_kind, in2_err = util.classify_input_file(in2) - output_file = util.find_benchmark_flag('--benchmark_out=', flags) - output_type = util.find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == util.IT_Executable and in2_kind == util.IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) + output_file = util.find_benchmark_flag("--benchmark_out=", flags) + output_type = util.find_benchmark_flag("--benchmark_out_format=", flags) + if ( + in1_kind == util.IT_Executable + and in2_kind == util.IT_Executable + and output_file + ): + print( + ( + "WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten" + ) + % output_file + ) if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON: # When both sides are JSON the only supported flag is # --benchmark_filter= - for flag in util.remove_benchmark_flags('--benchmark_filter=', flags): - print("WARNING: passing %s has no effect since both " - "inputs are JSON" % flag) - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" - " is not supported.") % output_type) + for flag in util.remove_benchmark_flags("--benchmark_filter=", flags): + print( + "WARNING: passing %s has no effect since both " + "inputs are JSON" % flag + ) + if output_type is not None and output_type != "json": + print( + ( + "ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" + " is not supported." + ) + % output_type + ) sys.exit(1) def create_parser(): parser = ArgumentParser( - description='versatile benchmark output compare tool') + description="versatile benchmark output compare tool" + ) parser.add_argument( - '-a', - '--display_aggregates_only', - dest='display_aggregates_only', + "-a", + "--display_aggregates_only", + dest="display_aggregates_only", action="store_true", help="If there are repetitions, by default, we display everything - the" - " actual runs, and the aggregates computed. Sometimes, it is " - "desirable to only view the aggregates. E.g. when there are a lot " - "of repetitions. Do note that only the display is affected. " - "Internally, all the actual runs are still used, e.g. for U test.") + " actual runs, and the aggregates computed. Sometimes, it is " + "desirable to only view the aggregates. E.g. when there are a lot " + "of repetitions. Do note that only the display is affected. " + "Internally, all the actual runs are still used, e.g. for U test.", + ) parser.add_argument( - '--no-color', - dest='color', + "--no-color", + dest="color", default=True, action="store_false", - help="Do not use colors in the terminal output" + help="Do not use colors in the terminal output", ) parser.add_argument( - '-d', - '--dump_to_json', - dest='dump_to_json', - help="Additionally, dump benchmark comparison output to this file in JSON format.") + "-d", + "--dump_to_json", + dest="dump_to_json", + help="Additionally, dump benchmark comparison output to this file in JSON format.", + ) utest = parser.add_argument_group() utest.add_argument( - '--no-utest', - dest='utest', + "--no-utest", + dest="utest", default=True, action="store_false", - help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) + help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format( + report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS + ), + ) alpha_default = 0.05 utest.add_argument( "--alpha", - dest='utest_alpha', + dest="utest_alpha", default=alpha_default, type=float, - help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % - alpha_default) + help=( + "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)" + ) + % alpha_default, + ) subparsers = parser.add_subparsers( - help='This tool has multiple modes of operation:', - dest='mode') + help="This tool has multiple modes of operation:", dest="mode" + ) parser_a = subparsers.add_parser( - 'benchmarks', - help='The most simple use-case, compare all the output of these two benchmarks') - baseline = parser_a.add_argument_group( - 'baseline', 'The benchmark baseline') + "benchmarks", + help="The most simple use-case, compare all the output of these two benchmarks", + ) + baseline = parser_a.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), + "test_baseline", + metavar="test_baseline", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) contender = parser_a.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), + "test_contender", + metavar="test_contender", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) parser_a.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) parser_b = subparsers.add_parser( - 'filters', help='Compare filter one with the filter two of benchmark') - baseline = parser_b.add_argument_group( - 'baseline', 'The benchmark baseline') + "filters", help="Compare filter one with the filter two of benchmark" + ) + baseline = parser_b.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test', - metavar='test', - type=argparse.FileType('r'), + "test", + metavar="test", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', + "filter_baseline", + metavar="filter_baseline", type=str, nargs=1, - help='The first filter, that will be used as baseline') + help="The first filter, that will be used as baseline", + ) contender = parser_b.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'filter_contender', - metavar='filter_contender', + "filter_contender", + metavar="filter_contender", type=str, nargs=1, - help='The second filter, that will be compared against the baseline') + help="The second filter, that will be compared against the baseline", + ) parser_b.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) parser_c = subparsers.add_parser( - 'benchmarksfiltered', - help='Compare filter one of first benchmark with filter two of the second benchmark') - baseline = parser_c.add_argument_group( - 'baseline', 'The benchmark baseline') + "benchmarksfiltered", + help="Compare filter one of first benchmark with filter two of the second benchmark", + ) + baseline = parser_c.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), + "test_baseline", + metavar="test_baseline", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', + "filter_baseline", + metavar="filter_baseline", type=str, nargs=1, - help='The first filter, that will be used as baseline') + help="The first filter, that will be used as baseline", + ) contender = parser_c.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), + "test_contender", + metavar="test_contender", + type=argparse.FileType("r"), nargs=1, - help='The second benchmark executable or JSON output file, that will be compared against the baseline') + help="The second benchmark executable or JSON output file, that will be compared against the baseline", + ) contender.add_argument( - 'filter_contender', - metavar='filter_contender', + "filter_contender", + metavar="filter_contender", type=str, nargs=1, - help='The second filter, that will be compared against the baseline') + help="The second filter, that will be compared against the baseline", + ) parser_c.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) return parser @@ -191,16 +234,16 @@ def main(): assert not unknown_args benchmark_options = args.benchmark_options - if args.mode == 'benchmarks': + if args.mode == "benchmarks": test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name - filter_baseline = '' - filter_contender = '' + filter_baseline = "" + filter_contender = "" # NOTE: if test_baseline == test_contender, you are analyzing the stdev - description = 'Comparing %s to %s' % (test_baseline, test_contender) - elif args.mode == 'filters': + description = "Comparing %s to %s" % (test_baseline, test_contender) + elif args.mode == "filters": test_baseline = args.test[0].name test_contender = args.test[0].name filter_baseline = args.filter_baseline[0] @@ -209,9 +252,12 @@ def main(): # NOTE: if filter_baseline == filter_contender, you are analyzing the # stdev - description = 'Comparing %s to %s (from %s)' % ( - filter_baseline, filter_contender, args.test[0].name) - elif args.mode == 'benchmarksfiltered': + description = "Comparing %s to %s (from %s)" % ( + filter_baseline, + filter_contender, + args.test[0].name, + ) + elif args.mode == "benchmarksfiltered": test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = args.filter_baseline[0] @@ -220,8 +266,12 @@ def main(): # NOTE: if test_baseline == test_contender and # filter_baseline == filter_contender, you are analyzing the stdev - description = 'Comparing %s (from %s) to %s (from %s)' % ( - filter_baseline, test_baseline, filter_contender, test_contender) + description = "Comparing %s (from %s) to %s (from %s)" % ( + filter_baseline, + test_baseline, + filter_contender, + test_contender, + ) else: # should never happen print("Unrecognized mode of operation: '%s'" % args.mode) @@ -231,199 +281,240 @@ def main(): check_inputs(test_baseline, test_contender, benchmark_options) if args.display_aggregates_only: - benchmark_options += ['--benchmark_display_aggregates_only=true'] + benchmark_options += ["--benchmark_display_aggregates_only=true"] options_baseline = [] options_contender = [] if filter_baseline and filter_contender: - options_baseline = ['--benchmark_filter=%s' % filter_baseline] - options_contender = ['--benchmark_filter=%s' % filter_contender] + options_baseline = ["--benchmark_filter=%s" % filter_baseline] + options_contender = ["--benchmark_filter=%s" % filter_contender] # Run the benchmarks and report the results - json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( - test_baseline, benchmark_options + options_baseline)) - json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( - test_contender, benchmark_options + options_contender)) + json1 = json1_orig = gbench.util.sort_benchmark_results( + gbench.util.run_or_load_benchmark( + test_baseline, benchmark_options + options_baseline + ) + ) + json2 = json2_orig = gbench.util.sort_benchmark_results( + gbench.util.run_or_load_benchmark( + test_contender, benchmark_options + options_contender + ) + ) # Now, filter the benchmarks so that the difference report can work if filter_baseline and filter_contender: - replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) + replacement = "[%s vs. %s]" % (filter_baseline, filter_contender) json1 = gbench.report.filter_benchmark( - json1_orig, filter_baseline, replacement) + json1_orig, filter_baseline, replacement + ) json2 = gbench.report.filter_benchmark( - json2_orig, filter_contender, replacement) + json2_orig, filter_contender, replacement + ) - diff_report = gbench.report.get_difference_report( - json1, json2, args.utest) + diff_report = gbench.report.get_difference_report(json1, json2, args.utest) output_lines = gbench.report.print_difference_report( diff_report, args.display_aggregates_only, - args.utest, args.utest_alpha, args.color) + args.utest, + args.utest_alpha, + args.color, + ) print(description) for ln in output_lines: print(ln) # Optionally, diff and output to JSON if args.dump_to_json is not None: - with open(args.dump_to_json, 'w') as f_json: - json.dump(diff_report, f_json) + with open(args.dump_to_json, "w") as f_json: + json.dump(diff_report, f_json, indent=1) + class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'gbench', - 'Inputs') - self.testInput0 = os.path.join(testInputs, 'test1_run1.json') - self.testInput1 = os.path.join(testInputs, 'test1_run2.json') + os.path.dirname(os.path.realpath(__file__)), "gbench", "Inputs" + ) + self.testInput0 = os.path.join(testInputs, "test1_run1.json") + self.testInput1 = os.path.join(testInputs, "test1_run2.json") def test_benchmarks_basic(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1]) + ["benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest(self): parsed = self.parser.parse_args( - ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) + ["--no-utest", "benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.05) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_display_aggregates_only(self): parsed = self.parser.parse_args( - ['-a', 'benchmarks', self.testInput0, self.testInput1]) + ["-a", "benchmarks", self.testInput0, self.testInput1] + ) self.assertTrue(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_with_utest_alpha(self): parsed = self.parser.parse_args( - ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + ["--alpha=0.314", "benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest_with_utest_alpha(self): parsed = self.parser.parse_args( - ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + [ + "--no-utest", + "--alpha=0.314", + "benchmarks", + self.testInput0, + self.testInput1, + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, 'd']) + ["benchmarks", self.testInput0, self.testInput1, "d"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['d']) + self.assertEqual(parsed.benchmark_options, ["d"]) def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + ["benchmarks", self.testInput0, self.testInput1, "--", "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['e']) + self.assertEqual(parsed.benchmark_options, ["e"]) def test_filters_basic(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd']) + parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d"]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") self.assertFalse(parsed.benchmark_options) def test_filters_with_remainder(self): parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', 'e']) + ["filters", self.testInput0, "c", "d", "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['e']) + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") + self.assertEqual(parsed.benchmark_options, ["e"]) def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', '--', 'f']) + ["filters", self.testInput0, "c", "d", "--", "f"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['f']) + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") + self.assertEqual(parsed.benchmark_options, ["f"]) def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + ["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.filter_contender[0], "e") self.assertFalse(parsed.benchmark_options) def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + [ + "benchmarksfiltered", + self.testInput0, + "c", + self.testInput1, + "e", + "f", + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'f') + self.assertEqual(parsed.filter_contender[0], "e") + self.assertEqual(parsed.benchmark_options[0], "f") def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + [ + "benchmarksfiltered", + self.testInput0, + "c", + self.testInput1, + "e", + "--", + "g", + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'g') + self.assertEqual(parsed.filter_contender[0], "e") + self.assertEqual(parsed.benchmark_options[0], "g") -if __name__ == '__main__': +if __name__ == "__main__": # unittest.main() main() diff --git a/contrib/restricted/google/benchmark/tools/compare/gbench/__init__.py b/contrib/restricted/google/benchmark/tools/compare/gbench/__init__.py index fce1a1acfb..9212568814 100644 --- a/contrib/restricted/google/benchmark/tools/compare/gbench/__init__.py +++ b/contrib/restricted/google/benchmark/tools/compare/gbench/__init__.py @@ -1,8 +1,8 @@ """Google Benchmark tooling""" -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' +__author__ = "Eric Fiselier" +__email__ = "eric@efcs.ca" __versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' +__version__ = ".".join(str(v) for v in __versioninfo__) + "dev" -__all__ = [] +__all__ = [] # type: ignore diff --git a/contrib/restricted/google/benchmark/tools/compare/gbench/report.py b/contrib/restricted/google/benchmark/tools/compare/gbench/report.py index b2bbfb9f62..7158fd1654 100644 --- a/contrib/restricted/google/benchmark/tools/compare/gbench/report.py +++ b/contrib/restricted/google/benchmark/tools/compare/gbench/report.py @@ -1,14 +1,17 @@ -"""report.py - Utilities for reporting statistics about benchmark results +# type: ignore + +""" +report.py - Utilities for reporting statistics about benchmark results """ -import unittest -import os -import re import copy +import os import random +import re +import unittest -from scipy.stats import mannwhitneyu, gmean from numpy import array +from scipy.stats import gmean, mannwhitneyu class BenchmarkColor(object): @@ -17,26 +20,25 @@ class BenchmarkColor(object): self.code = code def __repr__(self): - return '%s%r' % (self.__class__.__name__, - (self.name, self.code)) + return "%s%r" % (self.__class__.__name__, (self.name, self.code)) def __format__(self, format): return self.code # Benchmark Colors Enumeration -BC_NONE = BenchmarkColor('NONE', '') -BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') -BC_CYAN = BenchmarkColor('CYAN', '\033[96m') -BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') -BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') -BC_HEADER = BenchmarkColor('HEADER', '\033[92m') -BC_WARNING = BenchmarkColor('WARNING', '\033[93m') -BC_WHITE = BenchmarkColor('WHITE', '\033[97m') -BC_FAIL = BenchmarkColor('FAIL', '\033[91m') -BC_ENDC = BenchmarkColor('ENDC', '\033[0m') -BC_BOLD = BenchmarkColor('BOLD', '\033[1m') -BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') +BC_NONE = BenchmarkColor("NONE", "") +BC_MAGENTA = BenchmarkColor("MAGENTA", "\033[95m") +BC_CYAN = BenchmarkColor("CYAN", "\033[96m") +BC_OKBLUE = BenchmarkColor("OKBLUE", "\033[94m") +BC_OKGREEN = BenchmarkColor("OKGREEN", "\033[32m") +BC_HEADER = BenchmarkColor("HEADER", "\033[92m") +BC_WARNING = BenchmarkColor("WARNING", "\033[93m") +BC_WHITE = BenchmarkColor("WHITE", "\033[97m") +BC_FAIL = BenchmarkColor("FAIL", "\033[91m") +BC_ENDC = BenchmarkColor("ENDC", "\033[0m") +BC_BOLD = BenchmarkColor("BOLD", "\033[1m") +BC_UNDERLINE = BenchmarkColor("UNDERLINE", "\033[4m") UTEST_MIN_REPETITIONS = 2 UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. @@ -59,10 +61,14 @@ def color_format(use_color, fmt_str, *args, **kwargs): """ assert use_color is True or use_color is False if not use_color: - args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for arg in args] - kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for key, arg in kwargs.items()} + args = [ + arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for arg in args + ] + kwargs = { + key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for key, arg in kwargs.items() + } return fmt_str.format(*args, **kwargs) @@ -73,8 +79,8 @@ def find_longest_name(benchmark_list): """ longest_name = 1 for bc in benchmark_list: - if len(bc['name']) > longest_name: - longest_name = len(bc['name']) + if len(bc["name"]) > longest_name: + longest_name = len(bc["name"]) return longest_name @@ -95,13 +101,13 @@ def filter_benchmark(json_orig, family, replacement=""): """ regex = re.compile(family) filtered = {} - filtered['benchmarks'] = [] - for be in json_orig['benchmarks']: - if not regex.search(be['name']): + filtered["benchmarks"] = [] + for be in json_orig["benchmarks"]: + if not regex.search(be["name"]): continue filteredbench = copy.deepcopy(be) # Do NOT modify the old name! - filteredbench['name'] = regex.sub(replacement, filteredbench['name']) - filtered['benchmarks'].append(filteredbench) + filteredbench["name"] = regex.sub(replacement, filteredbench["name"]) + filtered["benchmarks"].append(filteredbench) return filtered @@ -110,9 +116,11 @@ def get_unique_benchmark_names(json): While *keeping* the order, give all the unique 'names' used for benchmarks. """ seen = set() - uniqued = [x['name'] for x in json['benchmarks'] - if x['name'] not in seen and - (seen.add(x['name']) or True)] + uniqued = [ + x["name"] + for x in json["benchmarks"] + if x["name"] not in seen and (seen.add(x["name"]) or True) + ] return uniqued @@ -125,7 +133,7 @@ def intersect(list1, list2): def is_potentially_comparable_benchmark(x): - return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x) + return "time_unit" in x and "real_time" in x and "cpu_time" in x def partition_benchmarks(json1, json2): @@ -142,18 +150,24 @@ def partition_benchmarks(json1, json2): time_unit = None # Pick the time unit from the first entry of the lhs benchmark. # We should be careful not to crash with unexpected input. - for x in json1['benchmarks']: - if (x['name'] == name and is_potentially_comparable_benchmark(x)): - time_unit = x['time_unit'] + for x in json1["benchmarks"]: + if x["name"] == name and is_potentially_comparable_benchmark(x): + time_unit = x["time_unit"] break if time_unit is None: continue # Filter by name and time unit. # All the repetitions are assumed to be comparable. - lhs = [x for x in json1['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] - rhs = [x for x in json2['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] + lhs = [ + x + for x in json1["benchmarks"] + if x["name"] == name and x["time_unit"] == time_unit + ] + rhs = [ + x + for x in json2["benchmarks"] + if x["name"] == name and x["time_unit"] == time_unit + ] partitions.append([lhs, rhs]) return partitions @@ -164,7 +178,7 @@ def get_timedelta_field_as_seconds(benchmark, field_name): time_unit, as time in seconds. """ timedelta = benchmark[field_name] - time_unit = benchmark.get('time_unit', 's') + time_unit = benchmark.get("time_unit", "s") return timedelta * _TIME_UNIT_TO_SECONDS_MULTIPLIER.get(time_unit) @@ -174,11 +188,15 @@ def calculate_geomean(json): and calculate their geomean. """ times = [] - for benchmark in json['benchmarks']: - if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate': + for benchmark in json["benchmarks"]: + if "run_type" in benchmark and benchmark["run_type"] == "aggregate": continue - times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'), - get_timedelta_field_as_seconds(benchmark, 'cpu_time')]) + times.append( + [ + get_timedelta_field_as_seconds(benchmark, "real_time"), + get_timedelta_field_as_seconds(benchmark, "cpu_time"), + ] + ) return gmean(times) if times else array([]) @@ -190,19 +208,23 @@ def extract_field(partition, field_name): def calc_utest(timings_cpu, timings_time): - min_rep_cnt = min(len(timings_time[0]), - len(timings_time[1]), - len(timings_cpu[0]), - len(timings_cpu[1])) + min_rep_cnt = min( + len(timings_time[0]), + len(timings_time[1]), + len(timings_cpu[0]), + len(timings_cpu[1]), + ) # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? if min_rep_cnt < UTEST_MIN_REPETITIONS: return False, None, None time_pvalue = mannwhitneyu( - timings_time[0], timings_time[1], alternative='two-sided').pvalue + timings_time[0], timings_time[1], alternative="two-sided" + ).pvalue cpu_pvalue = mannwhitneyu( - timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue + timings_cpu[0], timings_cpu[1], alternative="two-sided" + ).pvalue return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue @@ -212,38 +234,46 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN # Check if we failed miserably with minimum required repetitions for utest - if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None: + if ( + not utest["have_optimal_repetitions"] + and utest["cpu_pvalue"] is None + and utest["time_pvalue"] is None + ): return [] dsc = "U Test, Repetitions: {} vs {}".format( - utest['nr_of_repetitions'], utest['nr_of_repetitions_other']) + utest["nr_of_repetitions"], utest["nr_of_repetitions_other"] + ) dsc_color = BC_OKGREEN # We still got some results to show but issue a warning about it. - if not utest['have_optimal_repetitions']: + if not utest["have_optimal_repetitions"]: dsc_color = BC_WARNING dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( - UTEST_OPTIMAL_REPETITIONS) + UTEST_OPTIMAL_REPETITIONS + ) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" - return [color_format(use_color, - special_str, - BC_HEADER, - "{}{}".format(bc_name, UTEST_COL_NAME), - first_col_width, - get_utest_color( - utest['time_pvalue']), utest['time_pvalue'], - get_utest_color( - utest['cpu_pvalue']), utest['cpu_pvalue'], - dsc_color, dsc, - endc=BC_ENDC)] - - -def get_difference_report( - json1, - json2, - utest=False): + return [ + color_format( + use_color, + special_str, + BC_HEADER, + "{}{}".format(bc_name, UTEST_COL_NAME), + first_col_width, + get_utest_color(utest["time_pvalue"]), + utest["time_pvalue"], + get_utest_color(utest["cpu_pvalue"]), + utest["cpu_pvalue"], + dsc_color, + dsc, + endc=BC_ENDC, + ) + ] + + +def get_difference_report(json1, json2, utest=False): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. Output is another json containing @@ -254,37 +284,44 @@ def get_difference_report( diff_report = [] partitions = partition_benchmarks(json1, json2) for partition in partitions: - benchmark_name = partition[0][0]['name'] - label = partition[0][0]['label'] if 'label' in partition[0][0] else '' - time_unit = partition[0][0]['time_unit'] + benchmark_name = partition[0][0]["name"] + label = partition[0][0]["label"] if "label" in partition[0][0] else "" + time_unit = partition[0][0]["time_unit"] measurements = [] utest_results = {} # Careful, we may have different repetition count. for i in range(min(len(partition[0]), len(partition[1]))): bn = partition[0][i] other_bench = partition[1][i] - measurements.append({ - 'real_time': bn['real_time'], - 'cpu_time': bn['cpu_time'], - 'real_time_other': other_bench['real_time'], - 'cpu_time_other': other_bench['cpu_time'], - 'time': calculate_change(bn['real_time'], other_bench['real_time']), - 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time']) - }) + measurements.append( + { + "real_time": bn["real_time"], + "cpu_time": bn["cpu_time"], + "real_time_other": other_bench["real_time"], + "cpu_time_other": other_bench["cpu_time"], + "time": calculate_change( + bn["real_time"], other_bench["real_time"] + ), + "cpu": calculate_change( + bn["cpu_time"], other_bench["cpu_time"] + ), + } + ) # After processing the whole partition, if requested, do the U test. if utest: - timings_cpu = extract_field(partition, 'cpu_time') - timings_time = extract_field(partition, 'real_time') + timings_cpu = extract_field(partition, "cpu_time") + timings_time = extract_field(partition, "real_time") have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest( - timings_cpu, timings_time) - if cpu_pvalue and time_pvalue: + timings_cpu, timings_time + ) + if cpu_pvalue is not None and time_pvalue is not None: utest_results = { - 'have_optimal_repetitions': have_optimal_repetitions, - 'cpu_pvalue': cpu_pvalue, - 'time_pvalue': time_pvalue, - 'nr_of_repetitions': len(timings_cpu[0]), - 'nr_of_repetitions_other': len(timings_cpu[1]) + "have_optimal_repetitions": have_optimal_repetitions, + "cpu_pvalue": cpu_pvalue, + "time_pvalue": time_pvalue, + "nr_of_repetitions": len(timings_cpu[0]), + "nr_of_repetitions_other": len(timings_cpu[1]), } # Store only if we had any measurements for given benchmark. @@ -292,47 +329,63 @@ def get_difference_report( # time units which are not compatible with other time units in the # benchmark suite. if measurements: - run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else '' - aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else '' - diff_report.append({ - 'name': benchmark_name, - 'label': label, - 'measurements': measurements, - 'time_unit': time_unit, - 'run_type': run_type, - 'aggregate_name': aggregate_name, - 'utest': utest_results - }) + run_type = ( + partition[0][0]["run_type"] + if "run_type" in partition[0][0] + else "" + ) + aggregate_name = ( + partition[0][0]["aggregate_name"] + if run_type == "aggregate" + and "aggregate_name" in partition[0][0] + else "" + ) + diff_report.append( + { + "name": benchmark_name, + "label": label, + "measurements": measurements, + "time_unit": time_unit, + "run_type": run_type, + "aggregate_name": aggregate_name, + "utest": utest_results, + } + ) lhs_gmean = calculate_geomean(json1) rhs_gmean = calculate_geomean(json2) if lhs_gmean.any() and rhs_gmean.any(): - diff_report.append({ - 'name': 'OVERALL_GEOMEAN', - 'label': '', - 'measurements': [{ - 'real_time': lhs_gmean[0], - 'cpu_time': lhs_gmean[1], - 'real_time_other': rhs_gmean[0], - 'cpu_time_other': rhs_gmean[1], - 'time': calculate_change(lhs_gmean[0], rhs_gmean[0]), - 'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1]) - }], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - }) + diff_report.append( + { + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": lhs_gmean[0], + "cpu_time": lhs_gmean[1], + "real_time_other": rhs_gmean[0], + "cpu_time_other": rhs_gmean[1], + "time": calculate_change(lhs_gmean[0], rhs_gmean[0]), + "cpu": calculate_change(lhs_gmean[1], rhs_gmean[1]), + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + } + ) return diff_report def print_difference_report( - json_diff_report, - include_aggregates_only=False, - utest=False, - utest_alpha=0.05, - use_color=True): + json_diff_report, + include_aggregates_only=False, + utest=False, + utest_alpha=0.05, + use_color=True, +): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. @@ -348,44 +401,53 @@ def print_difference_report( return BC_CYAN first_col_width = find_longest_name(json_diff_report) - first_col_width = max( - first_col_width, - len('Benchmark')) + first_col_width = max(first_col_width, len("Benchmark")) first_col_width += len(UTEST_COL_NAME) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - 'Benchmark', 12 + first_col_width) - output_strs = [first_line, '-' * len(first_line)] + "Benchmark", 12 + first_col_width + ) + output_strs = [first_line, "-" * len(first_line)] fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" for benchmark in json_diff_report: # *If* we were asked to only include aggregates, # and if it is non-aggregate, then don't print it. - if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate': - for measurement in benchmark['measurements']: - output_strs += [color_format(use_color, - fmt_str, - BC_HEADER, - benchmark['name'], - first_col_width, - get_color(measurement['time']), - measurement['time'], - get_color(measurement['cpu']), - measurement['cpu'], - measurement['real_time'], - measurement['real_time_other'], - measurement['cpu_time'], - measurement['cpu_time_other'], - endc=BC_ENDC)] + if ( + not include_aggregates_only + or "run_type" not in benchmark + or benchmark["run_type"] == "aggregate" + ): + for measurement in benchmark["measurements"]: + output_strs += [ + color_format( + use_color, + fmt_str, + BC_HEADER, + benchmark["name"], + first_col_width, + get_color(measurement["time"]), + measurement["time"], + get_color(measurement["cpu"]), + measurement["cpu"], + measurement["real_time"], + measurement["real_time_other"], + measurement["cpu_time"], + measurement["cpu_time_other"], + endc=BC_ENDC, + ) + ] # After processing the measurements, if requested and # if applicable (e.g. u-test exists for given benchmark), # print the U test. - if utest and benchmark['utest']: - output_strs += print_utest(benchmark['name'], - benchmark['utest'], - utest_alpha=utest_alpha, - first_col_width=first_col_width, - use_color=use_color) + if utest and benchmark["utest"]: + output_strs += print_utest( + benchmark["name"], + benchmark["utest"], + utest_alpha=utest_alpha, + first_col_width=first_col_width, + use_color=use_color, + ) return output_strs @@ -397,21 +459,21 @@ def print_difference_report( class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test3_run0.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test3_run0.json") + with open(testOutput, "r") as f: json = json.load(f) return json def test_basic(self): expect_lines = [ - 'BM_One', - 'BM_Two', - 'short', # These two are not sorted - 'medium', # These two are not sorted + "BM_One", + "BM_Two", + "short", # These two are not sorted + "medium", # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) @@ -427,15 +489,15 @@ class TestReportDifference(unittest.TestCase): def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test1_run1.json") + testOutput2 = os.path.join(testInputs, "test1_run2.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 @@ -444,171 +506,323 @@ class TestReportDifference(unittest.TestCase): def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], - ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], - ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], - ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], - ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], - ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], - ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', - '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', - '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', - '-0.1000', '100', '110', '100', '90'], - ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], - ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], - ['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'], - ['OVERALL_GEOMEAN', '-0.8113', '-0.7779', '0', '0', '0', '0'] + ["BM_SameTimes", "+0.0000", "+0.0000", "10", "10", "10", "10"], + ["BM_2xFaster", "-0.5000", "-0.5000", "50", "25", "50", "25"], + ["BM_2xSlower", "+1.0000", "+1.0000", "50", "100", "50", "100"], + [ + "BM_1PercentFaster", + "-0.0100", + "-0.0100", + "100", + "99", + "100", + "99", + ], + [ + "BM_1PercentSlower", + "+0.0100", + "+0.0100", + "100", + "101", + "100", + "101", + ], + [ + "BM_10PercentFaster", + "-0.1000", + "-0.1000", + "100", + "90", + "100", + "90", + ], + [ + "BM_10PercentSlower", + "+0.1000", + "+0.1000", + "100", + "110", + "100", + "110", + ], + [ + "BM_100xSlower", + "+99.0000", + "+99.0000", + "100", + "10000", + "100", + "10000", + ], + [ + "BM_100xFaster", + "-0.9900", + "-0.9900", + "10000", + "100", + "10000", + "100", + ], + [ + "BM_10PercentCPUToTime", + "+0.1000", + "-0.1000", + "100", + "110", + "100", + "90", + ], + ["BM_ThirdFaster", "-0.3333", "-0.3334", "100", "67", "100", "67"], + ["BM_NotBadTimeUnit", "-0.9000", "+0.2000", "0", "0", "0", "1"], + ["BM_hasLabel", "+0.0000", "+0.0000", "1", "1", "1", "1"], + ["OVERALL_GEOMEAN", "-0.8113", "-0.7779", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, use_color=False) + self.json_diff_report, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) def test_json_diff_report_output(self): expected_output = [ { - 'name': 'BM_SameTimes', - 'label': '', - 'measurements': [{'time': 0.0000, 'cpu': 0.0000, - 'real_time': 10, 'real_time_other': 10, - 'cpu_time': 10, 'cpu_time_other': 10}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_SameTimes", + "label": "", + "measurements": [ + { + "time": 0.0000, + "cpu": 0.0000, + "real_time": 10, + "real_time_other": 10, + "cpu_time": 10, + "cpu_time_other": 10, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_2xFaster', - 'label': '', - 'measurements': [{'time': -0.5000, 'cpu': -0.5000, - 'real_time': 50, 'real_time_other': 25, - 'cpu_time': 50, 'cpu_time_other': 25}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_2xFaster", + "label": "", + "measurements": [ + { + "time": -0.5000, + "cpu": -0.5000, + "real_time": 50, + "real_time_other": 25, + "cpu_time": 50, + "cpu_time_other": 25, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_2xSlower', - 'label': '', - 'measurements': [{'time': 1.0000, 'cpu': 1.0000, - 'real_time': 50, 'real_time_other': 100, - 'cpu_time': 50, 'cpu_time_other': 100}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_2xSlower", + "label": "", + "measurements": [ + { + "time": 1.0000, + "cpu": 1.0000, + "real_time": 50, + "real_time_other": 100, + "cpu_time": 50, + "cpu_time_other": 100, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_1PercentFaster', - 'label': '', - 'measurements': [{'time': -0.0100, 'cpu': -0.0100, - 'real_time': 100, 'real_time_other': 98.9999999, - 'cpu_time': 100, 'cpu_time_other': 98.9999999}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_1PercentFaster", + "label": "", + "measurements": [ + { + "time": -0.0100, + "cpu": -0.0100, + "real_time": 100, + "real_time_other": 98.9999999, + "cpu_time": 100, + "cpu_time_other": 98.9999999, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_1PercentSlower', - 'label': '', - 'measurements': [{'time': 0.0100, 'cpu': 0.0100, - 'real_time': 100, 'real_time_other': 101, - 'cpu_time': 100, 'cpu_time_other': 101}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_1PercentSlower", + "label": "", + "measurements": [ + { + "time": 0.0100, + "cpu": 0.0100, + "real_time": 100, + "real_time_other": 101, + "cpu_time": 100, + "cpu_time_other": 101, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentFaster', - 'label': '', - 'measurements': [{'time': -0.1000, 'cpu': -0.1000, - 'real_time': 100, 'real_time_other': 90, - 'cpu_time': 100, 'cpu_time_other': 90}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentFaster", + "label": "", + "measurements": [ + { + "time": -0.1000, + "cpu": -0.1000, + "real_time": 100, + "real_time_other": 90, + "cpu_time": 100, + "cpu_time_other": 90, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentSlower', - 'label': '', - 'measurements': [{'time': 0.1000, 'cpu': 0.1000, - 'real_time': 100, 'real_time_other': 110, - 'cpu_time': 100, 'cpu_time_other': 110}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentSlower", + "label": "", + "measurements": [ + { + "time": 0.1000, + "cpu": 0.1000, + "real_time": 100, + "real_time_other": 110, + "cpu_time": 100, + "cpu_time_other": 110, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_100xSlower', - 'label': '', - 'measurements': [{'time': 99.0000, 'cpu': 99.0000, - 'real_time': 100, 'real_time_other': 10000, - 'cpu_time': 100, 'cpu_time_other': 10000}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_100xSlower", + "label": "", + "measurements": [ + { + "time": 99.0000, + "cpu": 99.0000, + "real_time": 100, + "real_time_other": 10000, + "cpu_time": 100, + "cpu_time_other": 10000, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_100xFaster', - 'label': '', - 'measurements': [{'time': -0.9900, 'cpu': -0.9900, - 'real_time': 10000, 'real_time_other': 100, - 'cpu_time': 10000, 'cpu_time_other': 100}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_100xFaster", + "label": "", + "measurements": [ + { + "time": -0.9900, + "cpu": -0.9900, + "real_time": 10000, + "real_time_other": 100, + "cpu_time": 10000, + "cpu_time_other": 100, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentCPUToTime', - 'label': '', - 'measurements': [{'time': 0.1000, 'cpu': -0.1000, - 'real_time': 100, 'real_time_other': 110, - 'cpu_time': 100, 'cpu_time_other': 90}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentCPUToTime", + "label": "", + "measurements": [ + { + "time": 0.1000, + "cpu": -0.1000, + "real_time": 100, + "real_time_other": 110, + "cpu_time": 100, + "cpu_time_other": 90, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_ThirdFaster', - 'label': '', - 'measurements': [{'time': -0.3333, 'cpu': -0.3334, - 'real_time': 100, 'real_time_other': 67, - 'cpu_time': 100, 'cpu_time_other': 67}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_ThirdFaster", + "label": "", + "measurements": [ + { + "time": -0.3333, + "cpu": -0.3334, + "real_time": 100, + "real_time_other": 67, + "cpu_time": 100, + "cpu_time_other": 67, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_NotBadTimeUnit', - 'label': '', - 'measurements': [{'time': -0.9000, 'cpu': 0.2000, - 'real_time': 0.4, 'real_time_other': 0.04, - 'cpu_time': 0.5, 'cpu_time_other': 0.6}], - 'time_unit': 's', - 'utest': {} + "name": "BM_NotBadTimeUnit", + "label": "", + "measurements": [ + { + "time": -0.9000, + "cpu": 0.2000, + "real_time": 0.4, + "real_time_other": 0.04, + "cpu_time": 0.5, + "cpu_time_other": 0.6, + } + ], + "time_unit": "s", + "utest": {}, }, { - 'name': 'BM_hasLabel', - 'label': 'a label', - 'measurements': [{'time': 0.0000, 'cpu': 0.0000, - 'real_time': 1, 'real_time_other': 1, - 'cpu_time': 1, 'cpu_time_other': 1}], - 'time_unit': 's', - 'utest': {} + "name": "BM_hasLabel", + "label": "a label", + "measurements": [ + { + "time": 0.0000, + "cpu": 0.0000, + "real_time": 1, + "real_time_other": 1, + "cpu_time": 1, + "cpu_time_other": 1, + } + ], + "time_unit": "s", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'label': '', - 'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06, - 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07, - 'time': -0.8112976497120911, 'cpu': -0.7778551721181174}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', 'utest': {} + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": 3.1622776601683826e-06, + "cpu_time": 3.2130844755623912e-06, + "real_time_other": 1.9768988699420897e-07, + "cpu_time_other": 2.397447755209533e-07, + "time": -0.8112976497120911, + "cpu": -0.7778551721181174, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['label'], expected['label']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["label"], expected["label"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -618,12 +832,12 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): def setUpClass(cls): def load_result(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test2_run.json") + with open(testOutput, "r") as f: json = json.load(f) return json @@ -634,65 +848,108 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], - ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], - ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], - ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], - ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0'] + [".", "-0.5000", "-0.5000", "10", "5", "10", "5"], + ["./4", "-0.5000", "-0.5000", "40", "20", "40", "20"], + ["Prefix/.", "-0.5000", "-0.5000", "20", "10", "20", "10"], + ["Prefix/./3", "-0.5000", "-0.5000", "30", "15", "30", "15"], + ["OVERALL_GEOMEAN", "-0.5000", "-0.5000", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, use_color=False) + self.json_diff_report, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'.', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}], - 'time_unit': 'ns', - 'utest': {} + "name": ".", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 10, + "real_time_other": 5, + "cpu_time": 10, + "cpu_time_other": 5, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'./4', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}], - 'time_unit': 'ns', - 'utest': {}, + "name": "./4", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 40, + "real_time_other": 20, + "cpu_time": 40, + "cpu_time_other": 20, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'Prefix/.', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}], - 'time_unit': 'ns', - 'utest': {} + "name": "Prefix/.", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 20, + "real_time_other": 10, + "cpu_time": 20, + "cpu_time_other": 10, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'Prefix/./3', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}], - 'time_unit': 'ns', - 'utest': {} + "name": "Prefix/./3", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 30, + "real_time_other": 15, + "cpu_time": 30, + "cpu_time_other": 15, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08, - 'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08, - 'time': -0.5000000000000009, 'cpu': -0.5000000000000009}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 2.213363839400641e-08, + "cpu_time": 2.213363839400641e-08, + "real_time_other": 1.1066819197003185e-08, + "cpu_time_other": 1.1066819197003185e-08, + "time": -0.5000000000000009, + "cpu": -0.5000000000000009, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -702,424 +959,489 @@ class TestReportDifferenceWithUTest(unittest.TestCase): def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test3_run0.json") + testOutput2 = os.path.join(testInputs, "test3_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"], + ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report_pretty_printing_aggregates_only(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, + include_aggregates_only=True, + utest=True, + utest_alpha=0.05, + use_color=False, + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'BM_One', - 'measurements': [ - {'time': -0.1, - 'cpu': 0.1, - 'real_time': 10, - 'real_time_other': 9, - 'cpu_time': 100, - 'cpu_time_other': 110} + "name": "BM_One", + "measurements": [ + { + "time": -0.1, + "cpu": 0.1, + "real_time": 10, + "real_time_other": 9, + "cpu_time": 100, + "cpu_time_other": 110, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': u'BM_Two', - 'measurements': [ - {'time': 0.1111111111111111, - 'cpu': -0.011111111111111112, - 'real_time': 9, - 'real_time_other': 10, - 'cpu_time': 90, - 'cpu_time_other': 89}, - {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, - 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + "name": "BM_Two", + "measurements": [ + { + "time": 0.1111111111111111, + "cpu": -0.011111111111111112, + "real_time": 9, + "real_time_other": 10, + "cpu_time": 90, + "cpu_time_other": 89, + }, + { + "time": -0.125, + "cpu": -0.16279069767441862, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 86, + "cpu_time_other": 72, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.6666666666666666, + "time_pvalue": 1.0, + }, }, { - 'name': u'short', - 'measurements': [ - {'time': -0.125, - 'cpu': -0.0625, - 'real_time': 8, - 'real_time_other': 7, - 'cpu_time': 80, - 'cpu_time_other': 75}, - {'time': -0.4325, - 'cpu': -0.13506493506493514, - 'real_time': 8, - 'real_time_other': 4.54, - 'cpu_time': 77, - 'cpu_time_other': 66.6} + "name": "short", + "measurements": [ + { + "time": -0.125, + "cpu": -0.0625, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 80, + "cpu_time_other": 75, + }, + { + "time": -0.4325, + "cpu": -0.13506493506493514, + "real_time": 8, + "real_time_other": 4.54, + "cpu_time": 77, + "cpu_time_other": 66.6, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.2, + "time_pvalue": 0.7670968684102772, + }, }, { - 'name': u'medium', - 'measurements': [ - {'time': -0.375, - 'cpu': -0.3375, - 'real_time': 8, - 'real_time_other': 5, - 'cpu_time': 80, - 'cpu_time_other': 53} + "name": "medium", + "measurements": [ + { + "time": -0.375, + "cpu": -0.3375, + "real_time": 8, + "real_time_other": 5, + "cpu_time": 80, + "cpu_time_other": 53, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, - 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, - 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 8.48528137423858e-09, + "cpu_time": 8.441336246629233e-08, + "real_time_other": 2.2405267593145244e-08, + "cpu_time_other": 2.5453661413660466e-08, + "time": 1.6404861082353634, + "cpu": -0.6984640740519662, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( - unittest.TestCase): + unittest.TestCase +): @classmethod def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test3_run0.json") + testOutput2 = os.path.join(testInputs, "test3_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"], + ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, - utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'BM_One', - 'measurements': [ - {'time': -0.1, - 'cpu': 0.1, - 'real_time': 10, - 'real_time_other': 9, - 'cpu_time': 100, - 'cpu_time_other': 110} + "name": "BM_One", + "measurements": [ + { + "time": -0.1, + "cpu": 0.1, + "real_time": 10, + "real_time_other": 9, + "cpu_time": 100, + "cpu_time_other": 110, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': u'BM_Two', - 'measurements': [ - {'time': 0.1111111111111111, - 'cpu': -0.011111111111111112, - 'real_time': 9, - 'real_time_other': 10, - 'cpu_time': 90, - 'cpu_time_other': 89}, - {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, - 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + "name": "BM_Two", + "measurements": [ + { + "time": 0.1111111111111111, + "cpu": -0.011111111111111112, + "real_time": 9, + "real_time_other": 10, + "cpu_time": 90, + "cpu_time_other": 89, + }, + { + "time": -0.125, + "cpu": -0.16279069767441862, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 86, + "cpu_time_other": 72, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.6666666666666666, + "time_pvalue": 1.0, + }, }, { - 'name': u'short', - 'measurements': [ - {'time': -0.125, - 'cpu': -0.0625, - 'real_time': 8, - 'real_time_other': 7, - 'cpu_time': 80, - 'cpu_time_other': 75}, - {'time': -0.4325, - 'cpu': -0.13506493506493514, - 'real_time': 8, - 'real_time_other': 4.54, - 'cpu_time': 77, - 'cpu_time_other': 66.6} + "name": "short", + "measurements": [ + { + "time": -0.125, + "cpu": -0.0625, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 80, + "cpu_time_other": 75, + }, + { + "time": -0.4325, + "cpu": -0.13506493506493514, + "real_time": 8, + "real_time_other": 4.54, + "cpu_time": 77, + "cpu_time_other": 66.6, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.2, + "time_pvalue": 0.7670968684102772, + }, }, { - 'name': u'medium', - 'measurements': [ - {'real_time_other': 5, - 'cpu_time': 80, - 'time': -0.375, - 'real_time': 8, - 'cpu_time_other': 53, - 'cpu': -0.3375 - } + "name": "medium", + "measurements": [ + { + "real_time_other": 5, + "cpu_time": 80, + "time": -0.375, + "real_time": 8, + "cpu_time_other": 53, + "cpu": -0.3375, + } ], - 'utest': {}, - 'time_unit': u'ns', - 'aggregate_name': '' + "utest": {}, + "time_unit": "ns", + "aggregate_name": "", }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, - 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, - 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 8.48528137423858e-09, + "cpu_time": 8.441336246629233e-08, + "real_time_other": 2.2405267593145244e-08, + "cpu_time_other": 2.5453661413660466e-08, + "time": 1.6404861082353634, + "cpu": -0.6984640740519662, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) -class TestReportDifferenceForPercentageAggregates( - unittest.TestCase): +class TestReportDifferenceForPercentageAggregates(unittest.TestCase): @classmethod def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test4_run0.json') - testOutput2 = os.path.join(testInputs, 'test4_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test4_run0.json") + testOutput2 = os.path.join(testInputs, "test4_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): - expect_lines = [ - ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0'] - ] + expect_lines = [["whocares", "-0.5000", "+0.5000", "0", "0", "0", "0"]] output_lines_with_header = print_difference_report( - self.json_diff_report, - utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'whocares', - 'measurements': [ - {'time': -0.5, - 'cpu': 0.5, - 'real_time': 0.01, - 'real_time_other': 0.005, - 'cpu_time': 0.10, - 'cpu_time_other': 0.15} + "name": "whocares", + "measurements": [ + { + "time": -0.5, + "cpu": 0.5, + "real_time": 0.01, + "real_time_other": 0.005, + "cpu_time": 0.10, + "cpu_time_other": 0.15, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -1129,12 +1451,12 @@ class TestReportSorting(unittest.TestCase): def setUpClass(cls): def load_result(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test4_run.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test4_run.json") + with open(testOutput, "r") as f: json = json.load(f) return json @@ -1155,45 +1477,141 @@ class TestReportSorting(unittest.TestCase): "91 family 1 instance 0 aggregate", "90 family 1 instance 1 repetition 0", "89 family 1 instance 1 repetition 1", - "88 family 1 instance 1 aggregate" + "88 family 1 instance 1 aggregate", ] - for n in range(len(self.json['benchmarks']) ** 2): - random.shuffle(self.json['benchmarks']) + for n in range(len(self.json["benchmarks"]) ** 2): + random.shuffle(self.json["benchmarks"]) sorted_benchmarks = util.sort_benchmark_results(self.json)[ - 'benchmarks'] + "benchmarks" + ] self.assertEqual(len(expected_names), len(sorted_benchmarks)) for out, expected in zip(sorted_benchmarks, expected_names): - self.assertEqual(out['name'], expected) + self.assertEqual(out["name"], expected) + + +class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2( + unittest.TestCase +): + @classmethod + def setUpClass(cls): + def load_results(): + import json + + testInputs = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test5_run0.json") + testOutput2 = os.path.join(testInputs, "test5_run1.json") + with open(testOutput1, "r") as f: + json1 = json.load(f) + json1["benchmarks"] = [ + json1["benchmarks"][0] for i in range(1000) + ] + with open(testOutput2, "r") as f: + json2 = json.load(f) + json2["benchmarks"] = [ + json2["benchmarks"][0] for i in range(1000) + ] + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report(json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_line = [ + "BM_ManyRepetitions_pvalue", + "0.0000", + "0.0000", + "U", + "Test,", + "Repetitions:", + "1000", + "vs", + "1000", + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) + output_lines = output_lines_with_header[2:] + found = False + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(" ") if x] + found = expect_line == parts + if found: + break + self.assertTrue(found) + + def test_json_diff_report(self): + expected_output = [ + { + "name": "BM_ManyRepetitions", + "label": "", + "time_unit": "s", + "run_type": "", + "aggregate_name": "", + "utest": { + "have_optimal_repetitions": True, + "cpu_pvalue": 0.0, + "time_pvalue": 0.0, + "nr_of_repetitions": 1000, + "nr_of_repetitions_other": 1000, + }, + }, + { + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": 1.0, + "cpu_time": 1000.000000000069, + "real_time_other": 1000.000000000069, + "cpu_time_other": 1.0, + "time": 999.000000000069, + "cpu": -0.9990000000000001, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) + assert_utest(self, out, expected) def assert_utest(unittest_instance, lhs, rhs): - if lhs['utest']: + if lhs["utest"]: unittest_instance.assertAlmostEqual( - lhs['utest']['cpu_pvalue'], - rhs['utest']['cpu_pvalue']) + lhs["utest"]["cpu_pvalue"], rhs["utest"]["cpu_pvalue"] + ) unittest_instance.assertAlmostEqual( - lhs['utest']['time_pvalue'], - rhs['utest']['time_pvalue']) + lhs["utest"]["time_pvalue"], rhs["utest"]["time_pvalue"] + ) unittest_instance.assertEqual( - lhs['utest']['have_optimal_repetitions'], - rhs['utest']['have_optimal_repetitions']) + lhs["utest"]["have_optimal_repetitions"], + rhs["utest"]["have_optimal_repetitions"], + ) else: # lhs is empty. assert if rhs is not. - unittest_instance.assertEqual(lhs['utest'], rhs['utest']) + unittest_instance.assertEqual(lhs["utest"], rhs["utest"]) def assert_measurements(unittest_instance, lhs, rhs): - for m1, m2 in zip(lhs['measurements'], rhs['measurements']): - unittest_instance.assertEqual(m1['real_time'], m2['real_time']) - unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time']) + for m1, m2 in zip(lhs["measurements"], rhs["measurements"]): + unittest_instance.assertEqual(m1["real_time"], m2["real_time"]) + unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"]) # m1['time'] and m1['cpu'] hold values which are being calculated, # and therefore we must use almost-equal pattern. - unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4) - unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4) + unittest_instance.assertAlmostEqual(m1["time"], m2["time"], places=4) + unittest_instance.assertAlmostEqual(m1["cpu"], m2["cpu"], places=4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/contrib/restricted/google/benchmark/tools/compare/gbench/util.py b/contrib/restricted/google/benchmark/tools/compare/gbench/util.py index 5e79da8f01..1119a1a2ca 100644 --- a/contrib/restricted/google/benchmark/tools/compare/gbench/util.py +++ b/contrib/restricted/google/benchmark/tools/compare/gbench/util.py @@ -1,5 +1,5 @@ -"""util.py - General utilities for running, loading, and processing benchmarks -""" +"""util.py - General utilities for running, loading, and processing benchmarks""" + import json import os import re @@ -7,13 +7,12 @@ import subprocess import sys import tempfile - # Input file type enumeration IT_Invalid = 0 IT_JSON = 1 IT_Executable = 2 -_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 +_num_magic_bytes = 2 if sys.platform.startswith("win") else 4 def is_executable_file(filename): @@ -24,21 +23,21 @@ def is_executable_file(filename): """ if not os.path.isfile(filename): return False - with open(filename, mode='rb') as f: + with open(filename, mode="rb") as f: magic_bytes = f.read(_num_magic_bytes) - if sys.platform == 'darwin': + if sys.platform == "darwin": return magic_bytes in [ - b'\xfe\xed\xfa\xce', # MH_MAGIC - b'\xce\xfa\xed\xfe', # MH_CIGAM - b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 - b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 - b'\xca\xfe\xba\xbe', # FAT_MAGIC - b'\xbe\xba\xfe\xca' # FAT_CIGAM + b"\xfe\xed\xfa\xce", # MH_MAGIC + b"\xce\xfa\xed\xfe", # MH_CIGAM + b"\xfe\xed\xfa\xcf", # MH_MAGIC_64 + b"\xcf\xfa\xed\xfe", # MH_CIGAM_64 + b"\xca\xfe\xba\xbe", # FAT_MAGIC + b"\xbe\xba\xfe\xca", # FAT_CIGAM ] - elif sys.platform.startswith('win'): - return magic_bytes == b'MZ' + elif sys.platform.startswith("win"): + return magic_bytes == b"MZ" else: - return magic_bytes == b'\x7FELF' + return magic_bytes == b"\x7fELF" def is_json_file(filename): @@ -47,7 +46,7 @@ def is_json_file(filename): 'False' otherwise. """ try: - with open(filename, 'r') as f: + with open(filename, "r") as f: json.load(f) return True except BaseException: @@ -72,7 +71,10 @@ def classify_input_file(filename): elif is_json_file(filename): ftype = IT_JSON else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename + err_msg = ( + "'%s' does not name a valid benchmark executable or JSON file" + % filename + ) return ftype, err_msg @@ -95,11 +97,11 @@ def find_benchmark_flag(prefix, benchmark_flags): if it is found return the arg it specifies. If specified more than once the last value is returned. If the flag is not found None is returned. """ - assert prefix.startswith('--') and prefix.endswith('=') + assert prefix.startswith("--") and prefix.endswith("=") result = None for f in benchmark_flags: if f.startswith(prefix): - result = f[len(prefix):] + result = f[len(prefix) :] return result @@ -108,7 +110,7 @@ def remove_benchmark_flags(prefix, benchmark_flags): Return a new list containing the specified benchmark_flags except those with the specified prefix. """ - assert prefix.startswith('--') and prefix.endswith('=') + assert prefix.startswith("--") and prefix.endswith("=") return [f for f in benchmark_flags if not f.startswith(prefix)] @@ -124,36 +126,61 @@ def load_benchmark_results(fname, benchmark_filter): REQUIRES: 'fname' names a file containing JSON benchmark output. """ + def benchmark_wanted(benchmark): if benchmark_filter is None: return True - name = benchmark.get('run_name', None) or benchmark['name'] - if re.search(benchmark_filter, name): - return True - return False + name = benchmark.get("run_name", None) or benchmark["name"] + return re.search(benchmark_filter, name) is not None - with open(fname, 'r') as f: + with open(fname, "r") as f: results = json.load(f) - if 'benchmarks' in results: - results['benchmarks'] = list(filter(benchmark_wanted, - results['benchmarks'])) + if "context" in results: + if "json_schema_version" in results["context"]: + json_schema_version = results["context"]["json_schema_version"] + if json_schema_version != 1: + print( + "In %s, got unnsupported JSON schema version: %i, expected 1" + % (fname, json_schema_version) + ) + sys.exit(1) + if "benchmarks" in results: + results["benchmarks"] = list( + filter(benchmark_wanted, results["benchmarks"]) + ) return results def sort_benchmark_results(result): - benchmarks = result['benchmarks'] + benchmarks = result["benchmarks"] # From inner key to the outer key! benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["repetition_index"] + if "repetition_index" in benchmark + else -1, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0) + benchmarks, + key=lambda benchmark: 1 + if "run_type" in benchmark and benchmark["run_type"] == "aggregate" + else 0, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["per_family_instance_index"] + if "per_family_instance_index" in benchmark + else -1, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["family_index"] + if "family_index" in benchmark + else -1, + ) - result['benchmarks'] = benchmarks + result["benchmarks"] = benchmarks return result @@ -164,21 +191,21 @@ def run_benchmark(exe_name, benchmark_flags): real time console output. RETURNS: A JSON object representing the benchmark output """ - output_name = find_benchmark_flag('--benchmark_out=', - benchmark_flags) + output_name = find_benchmark_flag("--benchmark_out=", benchmark_flags) is_temp_output = False if output_name is None: is_temp_output = True thandle, output_name = tempfile.mkstemp() os.close(thandle) - benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] + benchmark_flags = list(benchmark_flags) + [ + "--benchmark_out=%s" % output_name + ] cmd = [exe_name] + benchmark_flags - print("RUNNING: %s" % ' '.join(cmd)) + print("RUNNING: %s" % " ".join(cmd)) exitCode = subprocess.call(cmd) if exitCode != 0: - print('TEST FAILED...') + print("TEST FAILED...") sys.exit(exitCode) json_res = load_benchmark_results(output_name, None) if is_temp_output: @@ -195,9 +222,10 @@ def run_or_load_benchmark(filename, benchmark_flags): """ ftype = check_input_file(filename) if ftype == IT_JSON: - benchmark_filter = find_benchmark_flag('--benchmark_filter=', - benchmark_flags) + benchmark_filter = find_benchmark_flag( + "--benchmark_filter=", benchmark_flags + ) return load_benchmark_results(filename, benchmark_filter) if ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) - raise ValueError('Unknown file type %s' % ftype) + raise ValueError("Unknown file type %s" % ftype) diff --git a/contrib/restricted/google/benchmark/tools/compare/ya.make b/contrib/restricted/google/benchmark/tools/compare/ya.make index 46ac266649..d1005f244d 100644 --- a/contrib/restricted/google/benchmark/tools/compare/ya.make +++ b/contrib/restricted/google/benchmark/tools/compare/ya.make @@ -4,9 +4,9 @@ PY3_PROGRAM() WITHOUT_LICENSE_TEXTS() -VERSION(1.8.3) +VERSION(1.8.4) -ORIGINAL_SOURCE(https://github.com/google/benchmark/archive/v1.8.3.tar.gz) +ORIGINAL_SOURCE(https://github.com/google/benchmark/archive/v1.8.4.tar.gz) LICENSE(Apache-2.0) diff --git a/contrib/restricted/google/benchmark/ya.make b/contrib/restricted/google/benchmark/ya.make index f79be924dc..2ab39c9b34 100644 --- a/contrib/restricted/google/benchmark/ya.make +++ b/contrib/restricted/google/benchmark/ya.make @@ -2,9 +2,9 @@ LIBRARY() -VERSION(1.8.3) +VERSION(1.8.4) -ORIGINAL_SOURCE(https://github.com/google/benchmark/archive/v1.8.3.tar.gz) +ORIGINAL_SOURCE(https://github.com/google/benchmark/archive/v1.8.4.tar.gz) LICENSE(Apache-2.0) @@ -21,6 +21,7 @@ NO_UTIL() CFLAGS( GLOBAL -DBENCHMARK_STATIC_DEFINE + -DBENCHMARK_VERSION=\"v0.0.0\" -DHAVE_POSIX_REGEX -DHAVE_PTHREAD_AFFINITY -DHAVE_STD_REGEX |