summaryrefslogtreecommitdiffstats
path: root/contrib/libs/benchmark
diff options
context:
space:
mode:
authormonster <[email protected]>2022-07-07 14:41:37 +0300
committermonster <[email protected]>2022-07-07 14:41:37 +0300
commit06e5c21a835c0e923506c4ff27929f34e00761c2 (patch)
tree75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/libs/benchmark
parent03f024c4412e3aa613bb543cf1660176320ba8f4 (diff)
fix ya.make
Diffstat (limited to 'contrib/libs/benchmark')
-rw-r--r--contrib/libs/benchmark/AUTHORS63
-rw-r--r--contrib/libs/benchmark/CONTRIBUTING.md58
-rw-r--r--contrib/libs/benchmark/LICENSE202
-rw-r--r--contrib/libs/benchmark/README.md216
-rw-r--r--contrib/libs/benchmark/include/benchmark/benchmark.h1769
-rw-r--r--contrib/libs/benchmark/src/arraysize.h33
-rw-r--r--contrib/libs/benchmark/src/benchmark.cc626
-rw-r--r--contrib/libs/benchmark/src/benchmark_api_internal.cc112
-rw-r--r--contrib/libs/benchmark/src/benchmark_api_internal.h84
-rw-r--r--contrib/libs/benchmark/src/benchmark_name.cc58
-rw-r--r--contrib/libs/benchmark/src/benchmark_register.cc492
-rw-r--r--contrib/libs/benchmark/src/benchmark_register.h108
-rw-r--r--contrib/libs/benchmark/src/benchmark_runner.cc361
-rw-r--r--contrib/libs/benchmark/src/benchmark_runner.h104
-rw-r--r--contrib/libs/benchmark/src/check.h90
-rw-r--r--contrib/libs/benchmark/src/colorprint.cc188
-rw-r--r--contrib/libs/benchmark/src/colorprint.h33
-rw-r--r--contrib/libs/benchmark/src/commandlineflags.cc285
-rw-r--r--contrib/libs/benchmark/src/commandlineflags.h116
-rw-r--r--contrib/libs/benchmark/src/complexity.cc244
-rw-r--r--contrib/libs/benchmark/src/complexity.h55
-rw-r--r--contrib/libs/benchmark/src/console_reporter.cc190
-rw-r--r--contrib/libs/benchmark/src/counter.cc80
-rw-r--r--contrib/libs/benchmark/src/counter.h32
-rw-r--r--contrib/libs/benchmark/src/csv_reporter.cc158
-rw-r--r--contrib/libs/benchmark/src/cycleclock.h225
-rw-r--r--contrib/libs/benchmark/src/internal_macros.h102
-rw-r--r--contrib/libs/benchmark/src/json_reporter.cc323
-rw-r--r--contrib/libs/benchmark/src/log.h74
-rw-r--r--contrib/libs/benchmark/src/mutex.h155
-rw-r--r--contrib/libs/benchmark/src/perf_counters.cc132
-rw-r--r--contrib/libs/benchmark/src/perf_counters.h172
-rw-r--r--contrib/libs/benchmark/src/re.h158
-rw-r--r--contrib/libs/benchmark/src/reporter.cc114
-rw-r--r--contrib/libs/benchmark/src/sleep.cc66
-rw-r--r--contrib/libs/benchmark/src/sleep.h15
-rw-r--r--contrib/libs/benchmark/src/statistics.cc208
-rw-r--r--contrib/libs/benchmark/src/statistics.h38
-rw-r--r--contrib/libs/benchmark/src/string_util.cc265
-rw-r--r--contrib/libs/benchmark/src/string_util.h65
-rw-r--r--contrib/libs/benchmark/src/sysinfo.cc725
-rw-r--r--contrib/libs/benchmark/src/thread_manager.h63
-rw-r--r--contrib/libs/benchmark/src/thread_timer.h86
-rw-r--r--contrib/libs/benchmark/src/timers.cc259
-rw-r--r--contrib/libs/benchmark/src/timers.h48
45 files changed, 9050 insertions, 0 deletions
diff --git a/contrib/libs/benchmark/AUTHORS b/contrib/libs/benchmark/AUTHORS
new file mode 100644
index 00000000000..2b8072e095b
--- /dev/null
+++ b/contrib/libs/benchmark/AUTHORS
@@ -0,0 +1,63 @@
+# This is the official list of benchmark authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+#
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+#
+# Please keep the list sorted.
+
+Albert Pretorius <[email protected]>
+Alex Steele <[email protected]>
+Andriy Berestovskyy <[email protected]>
+Arne Beer <[email protected]>
+Carto
+Christian Wassermann <[email protected]>
+Christopher Seymour <[email protected]>
+Colin Braley <[email protected]>
+Daniel Harvey <[email protected]>
+David Coeurjolly <[email protected]>
+Deniz Evrenci <[email protected]>
+Dirac Research
+Dominik Czarnota <[email protected]>
+Dominik Korman <[email protected]>
+Donald Aingworth <[email protected]>
+Eric Backus <[email protected]>
+Eric Fiselier <[email protected]>
+Eugene Zhuk <[email protected]>
+Evgeny Safronov <[email protected]>
+Federico Ficarelli <[email protected]>
+Felix Homann <[email protected]>
+Gergő Szitár <[email protected]>
+Google Inc.
+International Business Machines Corporation
+Ismael Jimenez Martinez <[email protected]>
+Jern-Kuan Leong <[email protected]>
+JianXiong Zhou <[email protected]>
+Joao Paulo Magalhaes <[email protected]>
+Jordan Williams <[email protected]>
+Jussi Knuuttila <[email protected]>
+Kaito Udagawa <[email protected]>
+Kishan Kumar <[email protected]>
+Matt Clarkson <[email protected]>
+Maxim Vafin <[email protected]>
+MongoDB Inc.
+Nick Hutchinson <[email protected]>
+Norman Heino <[email protected]>
+Oleksandr Sochka <[email protected]>
+Ori Livneh <[email protected]>
+Paul Redmond <[email protected]>
+Radoslav Yovchev <[email protected]>
+Roman Lebedev <[email protected]>
+Sayan Bhattacharjee <[email protected]>
+Shuo Chen <[email protected]>
+Staffan Tjernstrom <[email protected]>
+Steinar H. Gunderson <[email protected]>
+Stripe, Inc.
+Tobias Schmidt <[email protected]>
+Yixuan Qiu <[email protected]>
+Yusuke Suzuki <[email protected]>
+Zbigniew Skowron <[email protected]>
+Min-Yih Hsu <[email protected]>
diff --git a/contrib/libs/benchmark/CONTRIBUTING.md b/contrib/libs/benchmark/CONTRIBUTING.md
new file mode 100644
index 00000000000..43de4c9d470
--- /dev/null
+++ b/contrib/libs/benchmark/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+Once your CLA is submitted (or if you already submitted one for
+another Google project), make a commit adding yourself to the
+[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
+of your first [pull request][].
+
+[AUTHORS]: AUTHORS
+[CONTRIBUTORS]: CONTRIBUTORS
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/contrib/libs/benchmark/LICENSE b/contrib/libs/benchmark/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/contrib/libs/benchmark/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/libs/benchmark/README.md b/contrib/libs/benchmark/README.md
new file mode 100644
index 00000000000..7b81d960fc1
--- /dev/null
+++ b/contrib/libs/benchmark/README.md
@@ -0,0 +1,216 @@
+# Benchmark
+
+[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test)
+[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml)
+[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint)
+[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings)
+
+[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
+[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
+
+
+A library to benchmark code snippets, similar to unit tests. Example:
+
+```c++
+#include <benchmark/benchmark.h>
+
+static void BM_SomeFunction(benchmark::State& state) {
+ // Perform setup here
+ for (auto _ : state) {
+ // This code gets timed
+ SomeFunction();
+ }
+}
+// Register the function as a benchmark
+BENCHMARK(BM_SomeFunction);
+// Run the benchmark
+BENCHMARK_MAIN();
+```
+
+## Getting Started
+
+To get started, see [Requirements](#requirements) and
+[Installation](#installation). See [Usage](#usage) for a full example and the
+[User Guide](docs/user_guide.md) for a more comprehensive feature overview.
+
+It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md)
+as some of the structural aspects of the APIs are similar.
+
+## Resources
+
+[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
+
+IRC channels:
+* [libera](https://libera.chat) #benchmark
+
+[Additional Tooling Documentation](docs/tools.md)
+
+[Assembly Testing Documentation](docs/AssemblyTests.md)
+
+## Requirements
+
+The library can be used with C++03. However, it requires C++11 to build,
+including compiler and standard library support.
+
+The following minimum versions are required to build the library:
+
+* GCC 4.8
+* Clang 3.4
+* Visual Studio 14 2015
+* Intel 2015 Update 1
+
+See [Platform-Specific Build Instructions](docs/platform_specific_build_instructions.md).
+
+## Installation
+
+This describes the installation process using cmake. As pre-requisites, you'll
+need git and cmake installed.
+
+_See [dependencies.md](docs/dependencies.md) for more details regarding supported
+versions of build tools._
+
+```bash
+# Check out the library.
+$ git clone https://github.com/google/benchmark.git
+# Go to the library root directory
+$ cd benchmark
+# Make a build directory to place the build output.
+$ cmake -E make_directory "build"
+# Generate build system files with cmake, and download any dependencies.
+$ cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../
+# or, starting with CMake 3.13, use a simpler form:
+# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build"
+# Build the library.
+$ cmake --build "build" --config Release
+```
+This builds the `benchmark` and `benchmark_main` libraries and tests.
+On a unix system, the build directory should now look something like this:
+
+```
+/benchmark
+ /build
+ /src
+ /libbenchmark.a
+ /libbenchmark_main.a
+ /test
+ ...
+```
+
+Next, you can run the tests to check the build.
+
+```bash
+$ cmake -E chdir "build" ctest --build-config Release
+```
+
+If you want to install the library globally, also run:
+
+```
+sudo cmake --build "build" --config Release --target install
+```
+
+Note that Google Benchmark requires Google Test to build and run the tests. This
+dependency can be provided two ways:
+
+* Checkout the Google Test sources into `benchmark/googletest`.
+* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
+ configuration as above, the library will automatically download and build
+ any required dependencies.
+
+If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
+to `CMAKE_ARGS`.
+
+### Debug vs Release
+
+By default, benchmark builds as a debug library. You will see a warning in the
+output when this is the case. To build it as a release library instead, add
+`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown
+above. The use of `--config Release` in build commands is needed to properly
+support multi-configuration tools (like Visual Studio for example) and can be
+skipped for other build systems (like Makefile).
+
+To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when
+generating the build system files.
+
+If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
+cache variables, if autodetection fails.
+
+If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
+`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
+
+### Stable and Experimental Library Versions
+
+The main branch contains the latest stable version of the benchmarking library;
+the API of which can be considered largely stable, with source breaking changes
+being made only upon the release of a new major version.
+
+Newer, experimental, features are implemented and tested on the
+[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
+to use, test, and provide feedback on the new features are encouraged to try
+this branch. However, this branch provides no stability guarantees and reserves
+the right to change and break the API at any time.
+
+## Usage
+
+### Basic usage
+
+Define a function that executes the code to measure, register it as a benchmark
+function using the `BENCHMARK` macro, and ensure an appropriate `main` function
+is available:
+
+```c++
+#include <benchmark/benchmark.h>
+
+static void BM_StringCreation(benchmark::State& state) {
+ for (auto _ : state)
+ std::string empty_string;
+}
+// Register the function as a benchmark
+BENCHMARK(BM_StringCreation);
+
+// Define another benchmark
+static void BM_StringCopy(benchmark::State& state) {
+ std::string x = "hello";
+ for (auto _ : state)
+ std::string copy(x);
+}
+BENCHMARK(BM_StringCopy);
+
+BENCHMARK_MAIN();
+```
+
+To run the benchmark, compile and link against the `benchmark` library
+(libbenchmark.a/.so). If you followed the build steps above, this library will
+be under the build directory you created.
+
+```bash
+# Example on linux after running the build steps above. Assumes the
+# `benchmark` and `build` directories are under the current directory.
+$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \
+ -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark
+```
+
+Alternatively, link against the `benchmark_main` library and remove
+`BENCHMARK_MAIN();` above to get the same behavior.
+
+The compiled executable will run all benchmarks by default. Pass the `--help`
+flag for option information or see the [User Guide](docs/user_guide.md).
+
+### Usage with CMake
+
+If using CMake, it is recommended to link against the project-provided
+`benchmark::benchmark` and `benchmark::benchmark_main` targets using
+`target_link_libraries`.
+It is possible to use ```find_package``` to import an installed version of the
+library.
+```cmake
+find_package(benchmark REQUIRED)
+```
+Alternatively, ```add_subdirectory``` will incorporate the library directly in
+to one's CMake project.
+```cmake
+add_subdirectory(benchmark)
+```
+Either way, link to the library as follows.
+```cmake
+target_link_libraries(MyTarget benchmark::benchmark)
+```
diff --git a/contrib/libs/benchmark/include/benchmark/benchmark.h b/contrib/libs/benchmark/include/benchmark/benchmark.h
new file mode 100644
index 00000000000..56d2805c77c
--- /dev/null
+++ b/contrib/libs/benchmark/include/benchmark/benchmark.h
@@ -0,0 +1,1769 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Support for registering benchmarks for functions.
+
+/* Example usage:
+// Define a function that executes the code to be measured a
+// specified number of times:
+static void BM_StringCreation(benchmark::State& state) {
+ for (auto _ : state)
+ std::string empty_string;
+}
+
+// Register the function as a benchmark
+BENCHMARK(BM_StringCreation);
+
+// Define another benchmark
+static void BM_StringCopy(benchmark::State& state) {
+ std::string x = "hello";
+ for (auto _ : state)
+ std::string copy(x);
+}
+BENCHMARK(BM_StringCopy);
+
+// Augment the main() program to invoke benchmarks if specified
+// via the --benchmark_filter command line flag. E.g.,
+// my_unittest --benchmark_filter=all
+// my_unittest --benchmark_filter=BM_StringCreation
+// my_unittest --benchmark_filter=String
+// my_unittest --benchmark_filter='Copy|Creation'
+int main(int argc, char** argv) {
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+ benchmark::Shutdown();
+ return 0;
+}
+
+// Sometimes a family of microbenchmarks can be implemented with
+// just one routine that takes an extra argument to specify which
+// one of the family of benchmarks to run. For example, the following
+// code defines a family of microbenchmarks for measuring the speed
+// of memcpy() calls of different lengths:
+
+static void BM_memcpy(benchmark::State& state) {
+ char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
+ memset(src, 'x', state.range(0));
+ for (auto _ : state)
+ memcpy(dst, src, state.range(0));
+ state.SetBytesProcessed(state.iterations() * state.range(0));
+ delete[] src; delete[] dst;
+}
+BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
+
+// The preceding code is quite repetitive, and can be replaced with the
+// following short-hand. The following invocation will pick a few
+// appropriate arguments in the specified range and will generate a
+// microbenchmark for each such argument.
+BENCHMARK(BM_memcpy)->Range(8, 8<<10);
+
+// You might have a microbenchmark that depends on two inputs. For
+// example, the following code defines a family of microbenchmarks for
+// measuring the speed of set insertion.
+static void BM_SetInsert(benchmark::State& state) {
+ set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming();
+ data = ConstructRandomSet(state.range(0));
+ state.ResumeTiming();
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert)
+ ->Args({1<<10, 128})
+ ->Args({2<<10, 128})
+ ->Args({4<<10, 128})
+ ->Args({8<<10, 128})
+ ->Args({1<<10, 512})
+ ->Args({2<<10, 512})
+ ->Args({4<<10, 512})
+ ->Args({8<<10, 512});
+
+// The preceding code is quite repetitive, and can be replaced with
+// the following short-hand. The following macro will pick a few
+// appropriate arguments in the product of the two specified ranges
+// and will generate a microbenchmark for each such pair.
+BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
+
+// For more complex patterns of inputs, passing a custom function
+// to Apply allows programmatic specification of an
+// arbitrary set of arguments to run the microbenchmark on.
+// The following example enumerates a dense range on
+// one parameter, and a sparse range on the second.
+static void CustomArguments(benchmark::internal::Benchmark* b) {
+ for (int i = 0; i <= 10; ++i)
+ for (int j = 32; j <= 1024*1024; j *= 8)
+ b->Args({i, j});
+}
+BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
+
+// Templated microbenchmarks work the same way:
+// Produce then consume 'size' messages 'iters' times
+// Measures throughput in the absence of multiprogramming.
+template <class Q> int BM_Sequential(benchmark::State& state) {
+ Q q;
+ typename Q::value_type v;
+ for (auto _ : state) {
+ for (int i = state.range(0); i--; )
+ q.push(v);
+ for (int e = state.range(0); e--; )
+ q.Wait(&v);
+ }
+ // actually messages, not bytes:
+ state.SetBytesProcessed(state.iterations() * state.range(0));
+}
+BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
+
+Use `Benchmark::MinTime(double t)` to set the minimum time used to run the
+benchmark. This option overrides the `benchmark_min_time` flag.
+
+void BM_test(benchmark::State& state) {
+ ... body ...
+}
+BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
+
+In a multithreaded test, it is guaranteed that none of the threads will start
+until all have reached the loop start, and all will have finished before any
+thread exits the loop body. As such, any global setup or teardown you want to
+do can be wrapped in a check against the thread index:
+
+static void BM_MultiThreaded(benchmark::State& state) {
+ if (state.thread_index() == 0) {
+ // Setup code here.
+ }
+ for (auto _ : state) {
+ // Run the test as normal.
+ }
+ if (state.thread_index() == 0) {
+ // Teardown code here.
+ }
+}
+BENCHMARK(BM_MultiThreaded)->Threads(4);
+
+
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+*/
+
+#ifndef BENCHMARK_BENCHMARK_H_
+#define BENCHMARK_BENCHMARK_H_
+
+// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
+#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
+#define BENCHMARK_HAS_CXX11
+#endif
+
+// This _MSC_VER check should detect VS 2017 v15.3 and newer.
+#if __cplusplus >= 201703L || \
+ (defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L)
+#define BENCHMARK_HAS_CXX17
+#endif
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iosfwd>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#if defined(BENCHMARK_HAS_CXX11)
+#include <atomic>
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+#endif
+
+#if defined(_MSC_VER)
+#include <intrin.h> // for _ReadWriteBarrier
+#endif
+
+#ifndef BENCHMARK_HAS_CXX11
+#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ TypeName& operator=(const TypeName&)
+#else
+#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ TypeName& operator=(const TypeName&) = delete
+#endif
+
+#ifdef BENCHMARK_HAS_CXX17
+#define BENCHMARK_UNUSED [[maybe_unused]]
+#elif defined(__GNUC__) || defined(__clang__)
+#define BENCHMARK_UNUSED __attribute__((unused))
+#else
+#define BENCHMARK_UNUSED
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline))
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#elif defined(_MSC_VER) && !defined(__clang__)
+#define BENCHMARK_ALWAYS_INLINE __forceinline
+#if _MSC_VER >= 1900
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#else
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+#define __func__ __FUNCTION__
+#else
+#define BENCHMARK_ALWAYS_INLINE
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+
+#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
+#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
+
+// clang-format off
+#if defined(__GNUC__) || defined(__clang__)
+#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
+#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
+#define BENCHMARK_DISABLE_DEPRECATED_WARNING \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop")
+#else
+#define BENCHMARK_BUILTIN_EXPECT(x, y) x
+#define BENCHMARK_DEPRECATED_MSG(msg)
+#define BENCHMARK_WARNING_MSG(msg) \
+ __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
+ __LINE__) ") : warning note: " msg))
+#define BENCHMARK_DISABLE_DEPRECATED_WARNING
+#define BENCHMARK_RESTORE_DEPRECATED_WARNING
+#endif
+// clang-format on
+
+#if defined(__GNUC__) && !defined(__clang__)
+#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#endif
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
+#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define BENCHMARK_UNREACHABLE() __assume(false)
+#else
+#define BENCHMARK_UNREACHABLE() ((void)0)
+#endif
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_OVERRIDE override
+#else
+#define BENCHMARK_OVERRIDE
+#endif
+
+namespace benchmark {
+class BenchmarkReporter;
+
+void Initialize(int* argc, char** argv);
+void Shutdown();
+
+// Report to stdout all arguments in 'argv' as unrecognized except the first.
+// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1).
+bool ReportUnrecognizedArguments(int argc, char** argv);
+
+// Returns the current value of --benchmark_filter.
+std::string GetBenchmarkFilter();
+
+// Generate a list of benchmarks matching the specified --benchmark_filter flag
+// and if --benchmark_list_tests is specified return after printing the name
+// of each matching benchmark. Otherwise run each matching benchmark and
+// report the results.
+//
+// spec : Specify the benchmarks to run. If users do not specify this arg,
+// then the value of FLAGS_benchmark_filter
+// will be used.
+//
+// The second and third overload use the specified 'display_reporter' and
+// 'file_reporter' respectively. 'file_reporter' will write to the file
+// specified
+// by '--benchmark_output'. If '--benchmark_output' is not given the
+// 'file_reporter' is ignored.
+//
+// RETURNS: The number of matching benchmarks.
+size_t RunSpecifiedBenchmarks();
+size_t RunSpecifiedBenchmarks(std::string spec);
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ std::string spec);
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter,
+ std::string spec);
+
+// If a MemoryManager is registered (via RegisterMemoryManager()),
+// it can be used to collect and report allocation metrics for a run of the
+// benchmark.
+class MemoryManager {
+ public:
+ static const int64_t TombstoneValue;
+
+ struct Result {
+ Result()
+ : num_allocs(0),
+ max_bytes_used(0),
+ total_allocated_bytes(TombstoneValue),
+ net_heap_growth(TombstoneValue) {}
+
+ // The number of allocations made in total between Start and Stop.
+ int64_t num_allocs;
+
+ // The peak memory use between Start and Stop.
+ int64_t max_bytes_used;
+
+ // The total memory allocated, in bytes, between Start and Stop.
+ // Init'ed to TombstoneValue if metric not available.
+ int64_t total_allocated_bytes;
+
+ // The net changes in memory, in bytes, between Start and Stop.
+ // ie., total_allocated_bytes - total_deallocated_bytes.
+ // Init'ed to TombstoneValue if metric not available.
+ int64_t net_heap_growth;
+ };
+
+ virtual ~MemoryManager() {}
+
+ // Implement this to start recording allocation information.
+ virtual void Start() = 0;
+
+ // Implement this to stop recording and fill out the given Result structure.
+ BENCHMARK_DEPRECATED_MSG("Use Stop(Result&) instead")
+ virtual void Stop(Result* result) = 0;
+
+ // FIXME(vyng): Make this pure virtual once we've migrated current users.
+ BENCHMARK_DISABLE_DEPRECATED_WARNING
+ virtual void Stop(Result& result) { Stop(&result); }
+ BENCHMARK_RESTORE_DEPRECATED_WARNING
+};
+
+// Register a MemoryManager instance that will be used to collect and report
+// allocation measurements for benchmark runs.
+void RegisterMemoryManager(MemoryManager* memory_manager);
+
+// Add a key-value pair to output as part of the context stanza in the report.
+void AddCustomContext(const std::string& key, const std::string& value);
+
+namespace internal {
+class Benchmark;
+class BenchmarkImp;
+class BenchmarkFamilies;
+
+void UseCharPointer(char const volatile*);
+
+// Take ownership of the pointer and register the benchmark. Return the
+// registered benchmark.
+Benchmark* RegisterBenchmarkInternal(Benchmark*);
+
+// Ensure that the standard streams are properly initialized in every TU.
+int InitializeStreams();
+BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
+
+} // namespace internal
+
+#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
+ defined(__EMSCRIPTEN__)
+#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
+#endif
+
+// Force the compiler to flush pending writes to global memory. Acts as an
+// effective read/write barrier
+#ifdef BENCHMARK_HAS_CXX11
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
+ std::atomic_signal_fence(std::memory_order_acq_rel);
+}
+#endif
+
+// The DoNotOptimize(...) function can be used to prevent a value or
+// expression from being optimized away by the compiler. This function is
+// intended to add little to no overhead.
+// See: https://youtu.be/nXaxk27zwlk?t=2441
+#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ asm volatile("" : : "r,m"(value) : "memory");
+}
+
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
+#if defined(__clang__)
+ asm volatile("" : "+r,m"(value) : : "memory");
+#else
+ asm volatile("" : "+m,r"(value) : : "memory");
+#endif
+}
+
+#ifndef BENCHMARK_HAS_CXX11
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
+ asm volatile("" : : : "memory");
+}
+#endif
+#elif defined(_MSC_VER)
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
+ _ReadWriteBarrier();
+}
+
+#ifndef BENCHMARK_HAS_CXX11
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
+#endif
+#else
+template <class Tp>
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
+ internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
+}
+// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11.
+#endif
+
+// This class is used for user-defined counters.
+class Counter {
+ public:
+ enum Flags {
+ kDefaults = 0,
+ // Mark the counter as a rate. It will be presented divided
+ // by the duration of the benchmark.
+ kIsRate = 1 << 0,
+ // Mark the counter as a thread-average quantity. It will be
+ // presented divided by the number of threads.
+ kAvgThreads = 1 << 1,
+ // Mark the counter as a thread-average rate. See above.
+ kAvgThreadsRate = kIsRate | kAvgThreads,
+ // Mark the counter as a constant value, valid/same for *every* iteration.
+ // When reporting, it will be *multiplied* by the iteration count.
+ kIsIterationInvariant = 1 << 2,
+ // Mark the counter as a constant rate.
+ // When reporting, it will be *multiplied* by the iteration count
+ // and then divided by the duration of the benchmark.
+ kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
+ // Mark the counter as a iteration-average quantity.
+ // It will be presented divided by the number of iterations.
+ kAvgIterations = 1 << 3,
+ // Mark the counter as a iteration-average rate. See above.
+ kAvgIterationsRate = kIsRate | kAvgIterations,
+
+ // In the end, invert the result. This is always done last!
+ kInvert = 1 << 31
+ };
+
+ enum OneK {
+ // 1'000 items per 1k
+ kIs1000 = 1000,
+ // 1'024 items per 1k
+ kIs1024 = 1024
+ };
+
+ double value;
+ Flags flags;
+ OneK oneK;
+
+ BENCHMARK_ALWAYS_INLINE
+ Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
+ : value(v), flags(f), oneK(k) {}
+
+ BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; }
+ BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
+};
+
+// A helper for user code to create unforeseen combinations of Flags, without
+// having to do this cast manually each time, or providing this operator.
+Counter::Flags inline operator|(const Counter::Flags& LHS,
+ const Counter::Flags& RHS) {
+ return static_cast<Counter::Flags>(static_cast<int>(LHS) |
+ static_cast<int>(RHS));
+}
+
+// This is the container for the user-defined counters.
+typedef std::map<std::string, Counter> UserCounters;
+
+// TimeUnit is passed to a benchmark in order to specify the order of magnitude
+// for the measured time.
+enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
+
+// BigO is passed to a benchmark in order to specify the asymptotic
+// computational
+// complexity for the benchmark. In case oAuto is selected, complexity will be
+// calculated automatically to the best fit.
+enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
+
+typedef uint64_t IterationCount;
+
+enum StatisticUnit { kTime, kPercentage };
+
+// BigOFunc is passed to a benchmark in order to specify the asymptotic
+// computational complexity for the benchmark.
+typedef double(BigOFunc)(IterationCount);
+
+// StatisticsFunc is passed to a benchmark in order to compute some descriptive
+// statistics over all the measurements of some type
+typedef double(StatisticsFunc)(const std::vector<double>&);
+
+namespace internal {
+struct Statistics {
+ std::string name_;
+ StatisticsFunc* compute_;
+ StatisticUnit unit_;
+
+ Statistics(const std::string& name, StatisticsFunc* compute,
+ StatisticUnit unit = kTime)
+ : name_(name), compute_(compute), unit_(unit) {}
+};
+
+class BenchmarkInstance;
+class ThreadTimer;
+class ThreadManager;
+class PerfCountersMeasurement;
+
+enum AggregationReportMode
+#if defined(BENCHMARK_HAS_CXX11)
+ : unsigned
+#else
+#endif
+{
+ // The mode has not been manually specified
+ ARM_Unspecified = 0,
+ // The mode is user-specified.
+ // This may or may not be set when the following bit-flags are set.
+ ARM_Default = 1U << 0U,
+ // File reporter should only output aggregates.
+ ARM_FileReportAggregatesOnly = 1U << 1U,
+ // Display reporter should only output aggregates
+ ARM_DisplayReportAggregatesOnly = 1U << 2U,
+ // Both reporters should only display aggregates.
+ ARM_ReportAggregatesOnly =
+ ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
+};
+
+} // namespace internal
+
+// State is passed to a running Benchmark and contains state for the
+// benchmark to use.
+class State {
+ public:
+ struct StateIterator;
+ friend struct StateIterator;
+
+ // Returns iterators used to run each iteration of a benchmark using a
+ // C++11 ranged-based for loop. These functions should not be called directly.
+ //
+ // REQUIRES: The benchmark has not started running yet. Neither begin nor end
+ // have been called previously.
+ //
+ // NOTE: KeepRunning may not be used after calling either of these functions.
+ BENCHMARK_ALWAYS_INLINE StateIterator begin();
+ BENCHMARK_ALWAYS_INLINE StateIterator end();
+
+ // Returns true if the benchmark should continue through another iteration.
+ // NOTE: A benchmark may not return from the test until KeepRunning() has
+ // returned false.
+ bool KeepRunning();
+
+ // Returns true iff the benchmark should run n more iterations.
+ // REQUIRES: 'n' > 0.
+ // NOTE: A benchmark must not return from the test until KeepRunningBatch()
+ // has returned false.
+ // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
+ //
+ // Intended usage:
+ // while (state.KeepRunningBatch(1000)) {
+ // // process 1000 elements
+ // }
+ bool KeepRunningBatch(IterationCount n);
+
+ // REQUIRES: timer is running and 'SkipWithError(...)' has not been called
+ // by the current thread.
+ // Stop the benchmark timer. If not called, the timer will be
+ // automatically stopped after the last iteration of the benchmark loop.
+ //
+ // For threaded benchmarks the PauseTiming() function only pauses the timing
+ // for the current thread.
+ //
+ // NOTE: The "real time" measurement is per-thread. If different threads
+ // report different measurements the largest one is reported.
+ //
+ // NOTE: PauseTiming()/ResumeTiming() are relatively
+ // heavyweight, and so their use should generally be avoided
+ // within each benchmark iteration, if possible.
+ void PauseTiming();
+
+ // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
+ // by the current thread.
+ // Start the benchmark timer. The timer is NOT running on entrance to the
+ // benchmark function. It begins running after control flow enters the
+ // benchmark loop.
+ //
+ // NOTE: PauseTiming()/ResumeTiming() are relatively
+ // heavyweight, and so their use should generally be avoided
+ // within each benchmark iteration, if possible.
+ void ResumeTiming();
+
+ // REQUIRES: 'SkipWithError(...)' has not been called previously by the
+ // current thread.
+ // Report the benchmark as resulting in an error with the specified 'msg'.
+ // After this call the user may explicitly 'return' from the benchmark.
+ //
+ // If the ranged-for style of benchmark loop is used, the user must explicitly
+ // break from the loop, otherwise all future iterations will be run.
+ // If the 'KeepRunning()' loop is used the current thread will automatically
+ // exit the loop at the end of the current iteration.
+ //
+ // For threaded benchmarks only the current thread stops executing and future
+ // calls to `KeepRunning()` will block until all threads have completed
+ // the `KeepRunning()` loop. If multiple threads report an error only the
+ // first error message is used.
+ //
+ // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit
+ // the current scope immediately. If the function is called from within
+ // the 'KeepRunning()' loop the current iteration will finish. It is the users
+ // responsibility to exit the scope as needed.
+ void SkipWithError(const char* msg);
+
+ // Returns true if an error has been reported with 'SkipWithError(...)'.
+ bool error_occurred() const { return error_occurred_; }
+
+ // REQUIRES: called exactly once per iteration of the benchmarking loop.
+ // Set the manually measured time for this benchmark iteration, which
+ // is used instead of automatically measured time if UseManualTime() was
+ // specified.
+ //
+ // For threaded benchmarks the final value will be set to the largest
+ // reported values.
+ void SetIterationTime(double seconds);
+
+ // Set the number of bytes processed by the current benchmark
+ // execution. This routine is typically called once at the end of a
+ // throughput oriented benchmark.
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ BENCHMARK_ALWAYS_INLINE
+ void SetBytesProcessed(int64_t bytes) {
+ counters["bytes_per_second"] =
+ Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t bytes_processed() const {
+ if (counters.find("bytes_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("bytes_per_second"));
+ return 0;
+ }
+
+ // If this routine is called with complexity_n > 0 and complexity report is
+ // requested for the
+ // family benchmark, then current benchmark will be part of the computation
+ // and complexity_n will
+ // represent the length of N.
+ BENCHMARK_ALWAYS_INLINE
+ void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t complexity_length_n() const { return complexity_n_; }
+
+ // If this routine is called with items > 0, then an items/s
+ // label is printed on the benchmark report line for the currently
+ // executing benchmark. It is typically called at the end of a processing
+ // benchmark where a processing items/second output is desired.
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ BENCHMARK_ALWAYS_INLINE
+ void SetItemsProcessed(int64_t items) {
+ counters["items_per_second"] =
+ Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ int64_t items_processed() const {
+ if (counters.find("items_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("items_per_second"));
+ return 0;
+ }
+
+ // If this routine is called, the specified label is printed at the
+ // end of the benchmark report line for the currently executing
+ // benchmark. Example:
+ // static void BM_Compress(benchmark::State& state) {
+ // ...
+ // double compress = input_size / output_size;
+ // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
+ // }
+ // Produces output that looks like:
+ // BM_Compress 50 50 14115038 compress:27.3%
+ //
+ // REQUIRES: a benchmark has exited its benchmarking loop.
+ void SetLabel(const char* label);
+
+ void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
+ this->SetLabel(str.c_str());
+ }
+
+ // Range arguments for this run. CHECKs if the argument has been set.
+ BENCHMARK_ALWAYS_INLINE
+ int64_t range(std::size_t pos = 0) const {
+ assert(range_.size() > pos);
+ return range_[pos];
+ }
+
+ BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
+ int64_t range_x() const { return range(0); }
+
+ BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
+ int64_t range_y() const { return range(1); }
+
+ // Number of threads concurrently executing the benchmark.
+ BENCHMARK_ALWAYS_INLINE
+ int threads() const { return threads_; }
+
+ // Index of the executing thread. Values from [0, threads).
+ BENCHMARK_ALWAYS_INLINE
+ int thread_index() const { return thread_index_; }
+
+ BENCHMARK_ALWAYS_INLINE
+ IterationCount iterations() const {
+ if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
+ return 0;
+ }
+ return max_iterations - total_iterations_ + batch_leftover_;
+ }
+
+ private:
+ // items we expect on the first cache line (ie 64 bytes of the struct)
+ // When total_iterations_ is 0, KeepRunning() and friends will return false.
+ // May be larger than max_iterations.
+ IterationCount total_iterations_;
+
+ // When using KeepRunningBatch(), batch_leftover_ holds the number of
+ // iterations beyond max_iters that were run. Used to track
+ // completed_iterations_ accurately.
+ IterationCount batch_leftover_;
+
+ public:
+ const IterationCount max_iterations;
+
+ private:
+ bool started_;
+ bool finished_;
+ bool error_occurred_;
+
+ // items we don't need on the first cache line
+ std::vector<int64_t> range_;
+
+ int64_t complexity_n_;
+
+ public:
+ // Container for user-defined counters.
+ UserCounters counters;
+
+ private:
+ State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager,
+ internal::PerfCountersMeasurement* perf_counters_measurement);
+
+ void StartKeepRunning();
+ // Implementation of KeepRunning() and KeepRunningBatch().
+ // is_batch must be true unless n is 1.
+ bool KeepRunningInternal(IterationCount n, bool is_batch);
+ void FinishKeepRunning();
+
+ const int thread_index_;
+ const int threads_;
+
+ internal::ThreadTimer* const timer_;
+ internal::ThreadManager* const manager_;
+ internal::PerfCountersMeasurement* const perf_counters_measurement_;
+
+ friend class internal::BenchmarkInstance;
+};
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
+ return KeepRunningInternal(1, /*is_batch=*/false);
+}
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
+ return KeepRunningInternal(n, /*is_batch=*/true);
+}
+
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
+ bool is_batch) {
+ // total_iterations_ is set to 0 by the constructor, and always set to a
+ // nonzero value by StartKepRunning().
+ assert(n > 0);
+ // n must be 1 unless is_batch is true.
+ assert(is_batch || n == 1);
+ if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
+ total_iterations_ -= n;
+ return true;
+ }
+ if (!started_) {
+ StartKeepRunning();
+ if (!error_occurred_ && total_iterations_ >= n) {
+ total_iterations_ -= n;
+ return true;
+ }
+ }
+ // For non-batch runs, total_iterations_ must be 0 by now.
+ if (is_batch && total_iterations_ != 0) {
+ batch_leftover_ = n - total_iterations_;
+ total_iterations_ = 0;
+ return true;
+ }
+ FinishKeepRunning();
+ return false;
+}
+
+struct State::StateIterator {
+ struct BENCHMARK_UNUSED Value {};
+ typedef std::forward_iterator_tag iterator_category;
+ typedef Value value_type;
+ typedef Value reference;
+ typedef Value pointer;
+ typedef std::ptrdiff_t difference_type;
+
+ private:
+ friend class State;
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator() : cached_(0), parent_() {}
+
+ BENCHMARK_ALWAYS_INLINE
+ explicit StateIterator(State* st)
+ : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {}
+
+ public:
+ BENCHMARK_ALWAYS_INLINE
+ Value operator*() const { return Value(); }
+
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator& operator++() {
+ assert(cached_ > 0);
+ --cached_;
+ return *this;
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ bool operator!=(StateIterator const&) const {
+ if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
+ parent_->FinishKeepRunning();
+ return false;
+ }
+
+ private:
+ IterationCount cached_;
+ State* const parent_;
+};
+
+inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() {
+ return StateIterator(this);
+}
+inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() {
+ StartKeepRunning();
+ return StateIterator();
+}
+
+namespace internal {
+
+typedef void(Function)(State&);
+
+// ------------------------------------------------------
+// Benchmark registration object. The BENCHMARK() macro expands
+// into an internal::Benchmark* object. Various methods can
+// be called on this object to change the properties of the benchmark.
+// Each method returns "this" so that multiple method calls can
+// chained into one expression.
+class Benchmark {
+ public:
+ virtual ~Benchmark();
+
+ // Note: the following methods all return "this" so that multiple
+ // method calls can be chained together in one expression.
+
+ // Specify the name of the benchmark
+ Benchmark* Name(const std::string& name);
+
+ // Run this benchmark once with "x" as the extra argument passed
+ // to the function.
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* Arg(int64_t x);
+
+ // Run this benchmark with the given time unit for the generated output report
+ Benchmark* Unit(TimeUnit unit);
+
+ // Run this benchmark once for a number of values picked from the
+ // range [start..limit]. (start and limit are always picked.)
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* Range(int64_t start, int64_t limit);
+
+ // Run this benchmark once for all values in the range [start..limit] with
+ // specific step
+ // REQUIRES: The function passed to the constructor must accept an arg1.
+ Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
+
+ // Run this benchmark once with "args" as the extra arguments passed
+ // to the function.
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* Args(const std::vector<int64_t>& args);
+
+ // Equivalent to Args({x, y})
+ // NOTE: This is a legacy C++03 interface provided for compatibility only.
+ // New code should use 'Args'.
+ Benchmark* ArgPair(int64_t x, int64_t y) {
+ std::vector<int64_t> args;
+ args.push_back(x);
+ args.push_back(y);
+ return Args(args);
+ }
+
+ // Run this benchmark once for a number of values picked from the
+ // ranges [start..limit]. (starts and limits are always picked.)
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
+
+ // Run this benchmark once for each combination of values in the (cartesian)
+ // product of the supplied argument lists.
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* ArgsProduct(const std::vector<std::vector<int64_t> >& arglists);
+
+ // Equivalent to ArgNames({name})
+ Benchmark* ArgName(const std::string& name);
+
+ // Set the argument names to display in the benchmark name. If not called,
+ // only argument values will be shown.
+ Benchmark* ArgNames(const std::vector<std::string>& names);
+
+ // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
+ // NOTE: This is a legacy C++03 interface provided for compatibility only.
+ // New code should use 'Ranges'.
+ Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
+ std::vector<std::pair<int64_t, int64_t> > ranges;
+ ranges.push_back(std::make_pair(lo1, hi1));
+ ranges.push_back(std::make_pair(lo2, hi2));
+ return Ranges(ranges);
+ }
+
+ // Have "setup" and/or "teardown" invoked once for every benchmark run.
+ // If the benchmark is multi-threaded (will run in k threads concurrently),
+ // the setup callback will be be invoked exactly once (not k times) before
+ // each run with k threads. Time allowing (e.g. for a short benchmark), there
+ // may be multiple such runs per benchmark, each run with its own
+ // "setup"/"teardown".
+ //
+ // If the benchmark uses different size groups of threads (e.g. via
+ // ThreadRange), the above will be true for each size group.
+ //
+ // The callback will be passed a State object, which includes the number
+ // of threads, thread-index, benchmark arguments, etc.
+ //
+ // The callback must not be NULL or self-deleting.
+ Benchmark* Setup(void (*setup)(const benchmark::State&));
+ Benchmark* Teardown(void (*teardown)(const benchmark::State&));
+
+ // Pass this benchmark object to *func, which can customize
+ // the benchmark by calling various methods like Arg, Args,
+ // Threads, etc.
+ Benchmark* Apply(void (*func)(Benchmark* benchmark));
+
+ // Set the range multiplier for non-dense range. If not called, the range
+ // multiplier kRangeMultiplier will be used.
+ Benchmark* RangeMultiplier(int multiplier);
+
+ // Set the minimum amount of time to use when running this benchmark. This
+ // option overrides the `benchmark_min_time` flag.
+ // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark.
+ Benchmark* MinTime(double t);
+
+ // Specify the amount of iterations that should be run by this benchmark.
+ // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark.
+ //
+ // NOTE: This function should only be used when *exact* iteration control is
+ // needed and never to control or limit how long a benchmark runs, where
+ // `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
+ Benchmark* Iterations(IterationCount n);
+
+ // Specify the amount of times to repeat this benchmark. This option overrides
+ // the `benchmark_repetitions` flag.
+ // REQUIRES: `n > 0`
+ Benchmark* Repetitions(int n);
+
+ // Specify if each repetition of the benchmark should be reported separately
+ // or if only the final statistics should be reported. If the benchmark
+ // is not repeated then the single result is always reported.
+ // Applies to *ALL* reporters (display and file).
+ Benchmark* ReportAggregatesOnly(bool value = true);
+
+ // Same as ReportAggregatesOnly(), but applies to display reporter only.
+ Benchmark* DisplayAggregatesOnly(bool value = true);
+
+ // By default, the CPU time is measured only for the main thread, which may
+ // be unrepresentative if the benchmark uses threads internally. If called,
+ // the total CPU time spent by all the threads will be measured instead.
+ // By default, the only the main thread CPU time will be measured.
+ Benchmark* MeasureProcessCPUTime();
+
+ // If a particular benchmark should use the Wall clock instead of the CPU time
+ // (be it either the CPU time of the main thread only (default), or the
+ // total CPU usage of the benchmark), call this method. If called, the elapsed
+ // (wall) time will be used to control how many iterations are run, and in the
+ // printing of items/second or MB/seconds values.
+ // If not called, the CPU time used by the benchmark will be used.
+ Benchmark* UseRealTime();
+
+ // If a benchmark must measure time manually (e.g. if GPU execution time is
+ // being
+ // measured), call this method. If called, each benchmark iteration should
+ // call
+ // SetIterationTime(seconds) to report the measured time, which will be used
+ // to control how many iterations are run, and in the printing of items/second
+ // or MB/second values.
+ Benchmark* UseManualTime();
+
+ // Set the asymptotic computational complexity for the benchmark. If called
+ // the asymptotic computational complexity will be shown on the output.
+ Benchmark* Complexity(BigO complexity = benchmark::oAuto);
+
+ // Set the asymptotic computational complexity for the benchmark. If called
+ // the asymptotic computational complexity will be shown on the output.
+ Benchmark* Complexity(BigOFunc* complexity);
+
+ // Add this statistics to be computed over all the values of benchmark run
+ Benchmark* ComputeStatistics(const std::string& name,
+ StatisticsFunc* statistics,
+ StatisticUnit unit = kTime);
+
+ // Support for running multiple copies of the same benchmark concurrently
+ // in multiple threads. This may be useful when measuring the scaling
+ // of some piece of code.
+
+ // Run one instance of this benchmark concurrently in t threads.
+ Benchmark* Threads(int t);
+
+ // Pick a set of values T from [min_threads,max_threads].
+ // min_threads and max_threads are always included in T. Run this
+ // benchmark once for each value in T. The benchmark run for a
+ // particular value t consists of t threads running the benchmark
+ // function concurrently. For example, consider:
+ // BENCHMARK(Foo)->ThreadRange(1,16);
+ // This will run the following benchmarks:
+ // Foo in 1 thread
+ // Foo in 2 threads
+ // Foo in 4 threads
+ // Foo in 8 threads
+ // Foo in 16 threads
+ Benchmark* ThreadRange(int min_threads, int max_threads);
+
+ // For each value n in the range, run this benchmark once using n threads.
+ // min_threads and max_threads are always included in the range.
+ // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts
+ // a benchmark with 1, 4, 7 and 8 threads.
+ Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1);
+
+ // Equivalent to ThreadRange(NumCPUs(), NumCPUs())
+ Benchmark* ThreadPerCpu();
+
+ virtual void Run(State& state) = 0;
+
+ protected:
+ explicit Benchmark(const char* name);
+ Benchmark(Benchmark const&);
+ void SetName(const char* name);
+
+ int ArgsCnt() const;
+
+ private:
+ friend class BenchmarkFamilies;
+ friend class BenchmarkInstance;
+
+ std::string name_;
+ AggregationReportMode aggregation_report_mode_;
+ std::vector<std::string> arg_names_; // Args for all benchmark runs
+ std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
+ TimeUnit time_unit_;
+ int range_multiplier_;
+ double min_time_;
+ IterationCount iterations_;
+ int repetitions_;
+ bool measure_process_cpu_time_;
+ bool use_real_time_;
+ bool use_manual_time_;
+ BigO complexity_;
+ BigOFunc* complexity_lambda_;
+ std::vector<Statistics> statistics_;
+ std::vector<int> thread_counts_;
+
+ typedef void (*callback_function)(const benchmark::State&);
+ callback_function setup_;
+ callback_function teardown_;
+
+ Benchmark& operator=(Benchmark const&);
+};
+
+} // namespace internal
+
+// Create and register a benchmark with the specified 'name' that invokes
+// the specified functor 'fn'.
+//
+// RETURNS: A pointer to the registered benchmark.
+internal::Benchmark* RegisterBenchmark(const char* name,
+ internal::Function* fn);
+
+#if defined(BENCHMARK_HAS_CXX11)
+template <class Lambda>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn);
+#endif
+
+// Remove all registered benchmarks. All pointers to previously registered
+// benchmarks are invalidated.
+void ClearRegisteredBenchmarks();
+
+namespace internal {
+// The class used to hold all Benchmarks created from static function.
+// (ie those created using the BENCHMARK(...) macros.
+class FunctionBenchmark : public Benchmark {
+ public:
+ FunctionBenchmark(const char* name, Function* func)
+ : Benchmark(name), func_(func) {}
+
+ virtual void Run(State& st) BENCHMARK_OVERRIDE;
+
+ private:
+ Function* func_;
+};
+
+#ifdef BENCHMARK_HAS_CXX11
+template <class Lambda>
+class LambdaBenchmark : public Benchmark {
+ public:
+ virtual void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); }
+
+ private:
+ template <class OLambda>
+ LambdaBenchmark(const char* name, OLambda&& lam)
+ : Benchmark(name), lambda_(std::forward<OLambda>(lam)) {}
+
+ LambdaBenchmark(LambdaBenchmark const&) = delete;
+
+ template <class Lam> // NOLINTNEXTLINE(readability-redundant-declaration)
+ friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&);
+
+ Lambda lambda_;
+};
+#endif
+
+} // namespace internal
+
+inline internal::Benchmark* RegisterBenchmark(const char* name,
+ internal::Function* fn) {
+ return internal::RegisterBenchmarkInternal(
+ ::new internal::FunctionBenchmark(name, fn));
+}
+
+#ifdef BENCHMARK_HAS_CXX11
+template <class Lambda>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) {
+ using BenchType =
+ internal::LambdaBenchmark<typename std::decay<Lambda>::type>;
+ return internal::RegisterBenchmarkInternal(
+ ::new BenchType(name, std::forward<Lambda>(fn)));
+}
+#endif
+
+#if defined(BENCHMARK_HAS_CXX11) && \
+ (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409)
+template <class Lambda, class... Args>
+internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn,
+ Args&&... args) {
+ return benchmark::RegisterBenchmark(
+ name, [=](benchmark::State& st) { fn(st, args...); });
+}
+#else
+#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
+#endif
+
+// The base class for all fixture tests.
+class Fixture : public internal::Benchmark {
+ public:
+ Fixture() : internal::Benchmark("") {}
+
+ virtual void Run(State& st) BENCHMARK_OVERRIDE {
+ this->SetUp(st);
+ this->BenchmarkCase(st);
+ this->TearDown(st);
+ }
+
+ // These will be deprecated ...
+ virtual void SetUp(const State&) {}
+ virtual void TearDown(const State&) {}
+ // ... In favor of these.
+ virtual void SetUp(State& st) { SetUp(const_cast<const State&>(st)); }
+ virtual void TearDown(State& st) { TearDown(const_cast<const State&>(st)); }
+
+ protected:
+ virtual void BenchmarkCase(State&) = 0;
+};
+
+} // namespace benchmark
+
+// ------------------------------------------------------
+// Macro to register benchmarks
+
+// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1
+// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be
+// empty. If X is empty the expression becomes (+1 == +0).
+#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
+#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__
+#else
+#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__
+#endif
+
+// Helpers for generating unique variable names
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_PRIVATE_NAME(...) \
+ BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, \
+ __VA_ARGS__)
+#else
+#define BENCHMARK_PRIVATE_NAME(n) \
+ BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
+#endif // BENCHMARK_HAS_CXX11
+
+#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
+#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
+// Helper for concatenation with macro name expansion
+#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \
+ BaseClass##_##Method##_Benchmark
+
+#define BENCHMARK_PRIVATE_DECLARE(n) \
+ static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
+ BENCHMARK_UNUSED
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK(...) \
+ BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \
+ &__VA_ARGS__)))
+#else
+#define BENCHMARK(n) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n, n)))
+#endif // BENCHMARK_HAS_CXX11
+
+// Old-style macros
+#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
+#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)})
+#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
+#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
+#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
+ BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
+
+#ifdef BENCHMARK_HAS_CXX11
+
+// Register a benchmark which invokes the function specified by `func`
+// with the additional arguments specified by `...`.
+//
+// For example:
+//
+// template <class ...ExtraArgs>`
+// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
+// [...]
+//}
+// /* Registers a benchmark named "BM_takes_args/int_string_test` */
+// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
+#define BENCHMARK_CAPTURE(func, test_case_name, ...) \
+ BENCHMARK_PRIVATE_DECLARE(func) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark( \
+ #func "/" #test_case_name, \
+ [](::benchmark::State& st) { func(st, __VA_ARGS__); })))
+
+#endif // BENCHMARK_HAS_CXX11
+
+// This will register a benchmark for a templatized function. For example:
+//
+// template<int arg>
+// void BM_Foo(int iters);
+//
+// BENCHMARK_TEMPLATE(BM_Foo, 1);
+//
+// will register BM_Foo<1> as a benchmark.
+#define BENCHMARK_TEMPLATE1(n, a) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n<a>)))
+
+#define BENCHMARK_TEMPLATE2(n, a, b) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
+ n<a, b>)))
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE(n, ...) \
+ BENCHMARK_PRIVATE_DECLARE(n) = \
+ (::benchmark::internal::RegisterBenchmarkInternal( \
+ new ::benchmark::internal::FunctionBenchmark( \
+ #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>)))
+#else
+#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a)
+#endif
+
+#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass { \
+ public: \
+ BaseClass##_##Method##_Benchmark() { \
+ this->SetName(#BaseClass "/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+ };
+
+#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() { \
+ this->SetName(#BaseClass "<" #a ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+ };
+
+#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() { \
+ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+ };
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() { \
+ this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+ };
+#else
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
+#endif
+
+#define BENCHMARK_DEFINE_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
+#endif
+
+#define BENCHMARK_REGISTER_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method))
+
+#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
+ BENCHMARK_PRIVATE_DECLARE(TestName) = \
+ (::benchmark::internal::RegisterBenchmarkInternal(new TestName()))
+
+// This macro will define and register a benchmark within a fixture class.
+#define BENCHMARK_F(BaseClass, Method) \
+ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
+#endif
+
+// Helper macro to create a main routine in a test that runs the benchmarks
+#define BENCHMARK_MAIN() \
+ int main(int argc, char** argv) { \
+ ::benchmark::Initialize(&argc, argv); \
+ if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
+ ::benchmark::RunSpecifiedBenchmarks(); \
+ ::benchmark::Shutdown(); \
+ return 0; \
+ } \
+ int main(int, char**)
+
+// ------------------------------------------------------
+// Benchmark Reporters
+
+namespace benchmark {
+
+struct CPUInfo {
+ struct CacheInfo {
+ std::string type;
+ int level;
+ int size;
+ int num_sharing;
+ };
+
+ enum Scaling { UNKNOWN, ENABLED, DISABLED };
+
+ int num_cpus;
+ Scaling scaling;
+ double cycles_per_second;
+ std::vector<CacheInfo> caches;
+ std::vector<double> load_avg;
+
+ static const CPUInfo& Get();
+
+ private:
+ CPUInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
+};
+
+// Adding Struct for System Information
+struct SystemInfo {
+ std::string name;
+ static const SystemInfo& Get();
+
+ private:
+ SystemInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
+};
+
+// BenchmarkName contains the components of the Benchmark's name
+// which allows individual fields to be modified or cleared before
+// building the final name using 'str()'.
+struct BenchmarkName {
+ std::string function_name;
+ std::string args;
+ std::string min_time;
+ std::string iterations;
+ std::string repetitions;
+ std::string time_type;
+ std::string threads;
+
+ // Return the full name of the benchmark with each non-empty
+ // field separated by a '/'
+ std::string str() const;
+};
+
+// Interface for custom benchmark result printers.
+// By default, benchmark reports are printed to stdout. However an application
+// can control the destination of the reports by calling
+// RunSpecifiedBenchmarks and passing it a custom reporter object.
+// The reporter object must implement the following interface.
+class BenchmarkReporter {
+ public:
+ struct Context {
+ CPUInfo const& cpu_info;
+ SystemInfo const& sys_info;
+ // The number of chars in the longest benchmark name.
+ size_t name_field_width;
+ static const char* executable_name;
+ Context();
+ };
+
+ struct Run {
+ static const int64_t no_repetition_index = -1;
+ enum RunType { RT_Iteration, RT_Aggregate };
+
+ Run()
+ : run_type(RT_Iteration),
+ aggregate_unit(kTime),
+ error_occurred(false),
+ iterations(1),
+ threads(1),
+ time_unit(kNanosecond),
+ real_accumulated_time(0),
+ cpu_accumulated_time(0),
+ max_heapbytes_used(0),
+ complexity(oNone),
+ complexity_lambda(),
+ complexity_n(0),
+ report_big_o(false),
+ report_rms(false),
+ memory_result(NULL),
+ allocs_per_iter(0.0) {}
+
+ std::string benchmark_name() const;
+ BenchmarkName run_name;
+ int64_t family_index;
+ int64_t per_family_instance_index;
+ RunType run_type;
+ std::string aggregate_name;
+ StatisticUnit aggregate_unit;
+ std::string report_label; // Empty if not set by benchmark.
+ bool error_occurred;
+ std::string error_message;
+
+ // Total iterations across all threads.
+ IterationCount iterations;
+
+ int64_t threads;
+ int64_t repetition_index;
+ int64_t repetitions;
+ TimeUnit time_unit;
+
+ // Total time across all threads.
+ double real_accumulated_time;
+ double cpu_accumulated_time;
+
+ // Return a value representing the real time per iteration in the unit
+ // specified by 'time_unit'.
+ // NOTE: If 'iterations' is zero the returned value represents the
+ // accumulated time.
+ double GetAdjustedRealTime() const;
+
+ // Return a value representing the cpu time per iteration in the unit
+ // specified by 'time_unit'.
+ // NOTE: If 'iterations' is zero the returned value represents the
+ // accumulated time.
+ double GetAdjustedCPUTime() const;
+
+ // This is set to 0.0 if memory tracing is not enabled.
+ double max_heapbytes_used;
+
+ // Keep track of arguments to compute asymptotic complexity
+ BigO complexity;
+ BigOFunc* complexity_lambda;
+ int64_t complexity_n;
+
+ // what statistics to compute from the measurements
+ const std::vector<internal::Statistics>* statistics;
+
+ // Inform print function whether the current run is a complexity report
+ bool report_big_o;
+ bool report_rms;
+
+ UserCounters counters;
+
+ // Memory metrics.
+ const MemoryManager::Result* memory_result;
+ double allocs_per_iter;
+ };
+
+ struct PerFamilyRunReports {
+ PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {}
+
+ // How many runs will all instances of this benchmark perform?
+ int num_runs_total;
+
+ // How many runs have happened already?
+ int num_runs_done;
+
+ // The reports about (non-errneous!) runs of this family.
+ std::vector<BenchmarkReporter::Run> Runs;
+ };
+
+ // Construct a BenchmarkReporter with the output stream set to 'std::cout'
+ // and the error stream set to 'std::cerr'
+ BenchmarkReporter();
+
+ // Called once for every suite of benchmarks run.
+ // The parameter "context" contains information that the
+ // reporter may wish to use when generating its report, for example the
+ // platform under which the benchmarks are running. The benchmark run is
+ // never started if this function returns false, allowing the reporter
+ // to skip runs based on the context information.
+ virtual bool ReportContext(const Context& context) = 0;
+
+ // Called once for each group of benchmark runs, gives information about
+ // cpu-time and heap memory usage during the benchmark run. If the group
+ // of runs contained more than two entries then 'report' contains additional
+ // elements representing the mean and standard deviation of those runs.
+ // Additionally if this group of runs was the last in a family of benchmarks
+ // 'reports' contains additional entries representing the asymptotic
+ // complexity and RMS of that benchmark family.
+ virtual void ReportRuns(const std::vector<Run>& report) = 0;
+
+ // Called once and only once after ever group of benchmarks is run and
+ // reported.
+ virtual void Finalize() {}
+
+ // REQUIRES: The object referenced by 'out' is valid for the lifetime
+ // of the reporter.
+ void SetOutputStream(std::ostream* out) {
+ assert(out);
+ output_stream_ = out;
+ }
+
+ // REQUIRES: The object referenced by 'err' is valid for the lifetime
+ // of the reporter.
+ void SetErrorStream(std::ostream* err) {
+ assert(err);
+ error_stream_ = err;
+ }
+
+ std::ostream& GetOutputStream() const { return *output_stream_; }
+
+ std::ostream& GetErrorStream() const { return *error_stream_; }
+
+ virtual ~BenchmarkReporter();
+
+ // Write a human readable string to 'out' representing the specified
+ // 'context'.
+ // REQUIRES: 'out' is non-null.
+ static void PrintBasicContext(std::ostream* out, Context const& context);
+
+ private:
+ std::ostream* output_stream_;
+ std::ostream* error_stream_;
+};
+
+// Simple reporter that outputs benchmark data to the console. This is the
+// default reporter used by RunSpecifiedBenchmarks().
+class ConsoleReporter : public BenchmarkReporter {
+ public:
+ enum OutputOptions {
+ OO_None = 0,
+ OO_Color = 1,
+ OO_Tabular = 2,
+ OO_ColorTabular = OO_Color | OO_Tabular,
+ OO_Defaults = OO_ColorTabular
+ };
+ explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
+ : output_options_(opts_), name_field_width_(0), printed_header_(false) {}
+
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
+ virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
+
+ protected:
+ virtual void PrintRunData(const Run& report);
+ virtual void PrintHeader(const Run& report);
+
+ OutputOptions output_options_;
+ size_t name_field_width_;
+ UserCounters prev_counters_;
+ bool printed_header_;
+};
+
+class JSONReporter : public BenchmarkReporter {
+ public:
+ JSONReporter() : first_report_(true) {}
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
+ virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
+ virtual void Finalize() BENCHMARK_OVERRIDE;
+
+ private:
+ void PrintRunData(const Run& report);
+
+ bool first_report_;
+};
+
+class BENCHMARK_DEPRECATED_MSG(
+ "The CSV Reporter will be removed in a future release") CSVReporter
+ : public BenchmarkReporter {
+ public:
+ CSVReporter() : printed_header_(false) {}
+ virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
+ virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
+
+ private:
+ void PrintRunData(const Run& report);
+
+ bool printed_header_;
+ std::set<std::string> user_counter_names_;
+};
+
+inline const char* GetTimeUnitString(TimeUnit unit) {
+ switch (unit) {
+ case kSecond:
+ return "s";
+ case kMillisecond:
+ return "ms";
+ case kMicrosecond:
+ return "us";
+ case kNanosecond:
+ return "ns";
+ }
+ BENCHMARK_UNREACHABLE();
+}
+
+inline double GetTimeUnitMultiplier(TimeUnit unit) {
+ switch (unit) {
+ case kSecond:
+ return 1;
+ case kMillisecond:
+ return 1e3;
+ case kMicrosecond:
+ return 1e6;
+ case kNanosecond:
+ return 1e9;
+ }
+ BENCHMARK_UNREACHABLE();
+}
+
+// Creates a list of integer values for the given range and multiplier.
+// This can be used together with ArgsProduct() to allow multiple ranges
+// with different multiplers.
+// Example:
+// ArgsProduct({
+// CreateRange(0, 1024, /*multi=*/32),
+// CreateRange(0, 100, /*multi=*/4),
+// CreateDenseRange(0, 4, /*step=*/1),
+// });
+std::vector<int64_t> CreateRange(int64_t lo, int64_t hi, int multi);
+
+// Creates a list of integer values for the given range and step.
+std::vector<int64_t> CreateDenseRange(int64_t start, int64_t limit, int step);
+
+} // namespace benchmark
+
+#endif // BENCHMARK_BENCHMARK_H_
diff --git a/contrib/libs/benchmark/src/arraysize.h b/contrib/libs/benchmark/src/arraysize.h
new file mode 100644
index 00000000000..51a50f2dff2
--- /dev/null
+++ b/contrib/libs/benchmark/src/arraysize.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_ARRAYSIZE_H_
+#define BENCHMARK_ARRAYSIZE_H_
+
+#include "internal_macros.h"
+
+namespace benchmark {
+namespace internal {
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef COMPILER_MSVC
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_ARRAYSIZE_H_
diff --git a/contrib/libs/benchmark/src/benchmark.cc b/contrib/libs/benchmark/src/benchmark.cc
new file mode 100644
index 00000000000..cedeee31c79
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark.cc
@@ -0,0 +1,626 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+#include "benchmark_api_internal.h"
+#include "benchmark_runner.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <memory>
+#include <random>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "perf_counters.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+namespace benchmark {
+// Print a list of benchmarks. This option overrides all other options.
+BM_DEFINE_bool(benchmark_list_tests, false);
+
+// A regular expression that specifies the set of benchmarks to execute. If
+// this flag is empty, or if this flag is the string \"all\", all benchmarks
+// linked into the binary are run.
+BM_DEFINE_string(benchmark_filter, "");
+
+// Minimum number of seconds we should run benchmark before results are
+// considered significant. For cpu-time based tests, this is the lower bound
+// on the total cpu time used by all threads that make up the test. For
+// real-time based tests, this is the lower bound on the elapsed time of the
+// benchmark execution, regardless of number of threads.
+BM_DEFINE_double(benchmark_min_time, 0.5);
+
+// The number of runs of each benchmark. If greater than 1, the mean and
+// standard deviation of the runs will be reported.
+BM_DEFINE_int32(benchmark_repetitions, 1);
+
+// If set, enable random interleaving of repetitions of all benchmarks.
+// See http://github.com/google/benchmark/issues/1051 for details.
+BM_DEFINE_bool(benchmark_enable_random_interleaving, false);
+
+// Report the result of each benchmark repetitions. When 'true' is specified
+// only the mean, standard deviation, and other statistics are reported for
+// repeated benchmarks. Affects all reporters.
+BM_DEFINE_bool(benchmark_report_aggregates_only, false);
+
+// Display the result of each benchmark repetitions. When 'true' is specified
+// only the mean, standard deviation, and other statistics are displayed for
+// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
+// the display reporter, but *NOT* file reporter, which will still contain
+// all the output.
+BM_DEFINE_bool(benchmark_display_aggregates_only, false);
+
+// The format to use for console output.
+// Valid values are 'console', 'json', or 'csv'.
+BM_DEFINE_string(benchmark_format, "console");
+
+// The format to use for file output.
+// Valid values are 'console', 'json', or 'csv'.
+BM_DEFINE_string(benchmark_out_format, "json");
+
+// The file to write additional output to.
+BM_DEFINE_string(benchmark_out, "");
+
+// Whether to use colors in the output. Valid values:
+// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
+// the output is being sent to a terminal and the TERM environment variable is
+// set to a terminal type that supports colors.
+BM_DEFINE_string(benchmark_color, "auto");
+
+// Whether to use tabular format when printing user counters to the console.
+// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
+BM_DEFINE_bool(benchmark_counters_tabular, false);
+
+// List of additional perf counters to collect, in libpfm format. For more
+// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
+BM_DEFINE_string(benchmark_perf_counters, "");
+
+// Extra context to include in the output formatted as comma-separated key-value
+// pairs. Kept internal as it's only used for parsing from env/command line.
+BM_DEFINE_kvpairs(benchmark_context, {});
+
+// The level of verbose logging to output
+BM_DEFINE_int32(v, 0);
+
+namespace internal {
+
+std::map<std::string, std::string>* global_context = nullptr;
+
+// FIXME: wouldn't LTO mess this up?
+void UseCharPointer(char const volatile*) {}
+
+} // namespace internal
+
+State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
+ int thread_i, int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager,
+ internal::PerfCountersMeasurement* perf_counters_measurement)
+ : total_iterations_(0),
+ batch_leftover_(0),
+ max_iterations(max_iters),
+ started_(false),
+ finished_(false),
+ error_occurred_(false),
+ range_(ranges),
+ complexity_n_(0),
+ thread_index_(thread_i),
+ threads_(n_threads),
+ timer_(timer),
+ manager_(manager),
+ perf_counters_measurement_(perf_counters_measurement) {
+ BM_CHECK(max_iterations != 0) << "At least one iteration must be run";
+ BM_CHECK_LT(thread_index_, threads_)
+ << "thread_index must be less than threads";
+
+ // Note: The use of offsetof below is technically undefined until C++17
+ // because State is not a standard layout type. However, all compilers
+ // currently provide well-defined behavior as an extension (which is
+ // demonstrated since constexpr evaluation must diagnose all undefined
+ // behavior). However, GCC and Clang also warn about this use of offsetof,
+ // which must be suppressed.
+#if defined(__INTEL_COMPILER)
+#pragma warning push
+#pragma warning(disable : 1875)
+#elif defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+#endif
+ // Offset tests to ensure commonly accessed data is on the first cache line.
+ const int cache_line_size = 64;
+ static_assert(offsetof(State, error_occurred_) <=
+ (cache_line_size - sizeof(error_occurred_)),
+ "");
+#if defined(__INTEL_COMPILER)
+#pragma warning pop
+#elif defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+void State::PauseTiming() {
+ // Add in time accumulated so far
+ BM_CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StopTimer();
+ if (perf_counters_measurement_) {
+ auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
+ for (const auto& name_and_measurement : measurements) {
+ auto name = name_and_measurement.first;
+ auto measurement = name_and_measurement.second;
+ BM_CHECK_EQ(counters[name], 0.0);
+ counters[name] = Counter(measurement, Counter::kAvgIterations);
+ }
+ }
+}
+
+void State::ResumeTiming() {
+ BM_CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StartTimer();
+ if (perf_counters_measurement_) {
+ perf_counters_measurement_->Start();
+ }
+}
+
+void State::SkipWithError(const char* msg) {
+ BM_CHECK(msg);
+ error_occurred_ = true;
+ {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ if (manager_->results.has_error_ == false) {
+ manager_->results.error_message_ = msg;
+ manager_->results.has_error_ = true;
+ }
+ }
+ total_iterations_ = 0;
+ if (timer_->running()) timer_->StopTimer();
+}
+
+void State::SetIterationTime(double seconds) {
+ timer_->SetIterationTime(seconds);
+}
+
+void State::SetLabel(const char* label) {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ manager_->results.report_label_ = label;
+}
+
+void State::StartKeepRunning() {
+ BM_CHECK(!started_ && !finished_);
+ started_ = true;
+ total_iterations_ = error_occurred_ ? 0 : max_iterations;
+ manager_->StartStopBarrier();
+ if (!error_occurred_) ResumeTiming();
+}
+
+void State::FinishKeepRunning() {
+ BM_CHECK(started_ && (!finished_ || error_occurred_));
+ if (!error_occurred_) {
+ PauseTiming();
+ }
+ // Total iterations has now wrapped around past 0. Fix this.
+ total_iterations_ = 0;
+ finished_ = true;
+ manager_->StartStopBarrier();
+}
+
+namespace internal {
+namespace {
+
+// Flushes streams after invoking reporter methods that write to them. This
+// ensures users get timely updates even when streams are not line-buffered.
+void FlushStreams(BenchmarkReporter* reporter) {
+ if (!reporter) return;
+ std::flush(reporter->GetOutputStream());
+ std::flush(reporter->GetErrorStream());
+}
+
+// Reports in both display and file reporters.
+void Report(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter, const RunResults& run_results) {
+ auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
+ const RunResults& results) {
+ assert(reporter);
+ // If there are no aggregates, do output non-aggregates.
+ aggregates_only &= !results.aggregates_only.empty();
+ if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
+ if (!results.aggregates_only.empty())
+ reporter->ReportRuns(results.aggregates_only);
+ };
+
+ report_one(display_reporter, run_results.display_report_aggregates_only,
+ run_results);
+ if (file_reporter)
+ report_one(file_reporter, run_results.file_report_aggregates_only,
+ run_results);
+
+ FlushStreams(display_reporter);
+ FlushStreams(file_reporter);
+}
+
+void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
+ BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter) {
+ // Note the file_reporter can be null.
+ BM_CHECK(display_reporter != nullptr);
+
+ // Determine the width of the name field using a minimum width of 10.
+ bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
+ size_t name_field_width = 10;
+ size_t stat_field_width = 0;
+ for (const BenchmarkInstance& benchmark : benchmarks) {
+ name_field_width =
+ std::max<size_t>(name_field_width, benchmark.name().str().size());
+ might_have_aggregates |= benchmark.repetitions() > 1;
+
+ for (const auto& Stat : benchmark.statistics())
+ stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
+ }
+ if (might_have_aggregates) name_field_width += 1 + stat_field_width;
+
+ // Print header here
+ BenchmarkReporter::Context context;
+ context.name_field_width = name_field_width;
+
+ // Keep track of running times of all instances of each benchmark family.
+ std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
+ per_family_reports;
+
+ if (display_reporter->ReportContext(context) &&
+ (!file_reporter || file_reporter->ReportContext(context))) {
+ FlushStreams(display_reporter);
+ FlushStreams(file_reporter);
+
+ size_t num_repetitions_total = 0;
+
+ std::vector<internal::BenchmarkRunner> runners;
+ runners.reserve(benchmarks.size());
+ for (const BenchmarkInstance& benchmark : benchmarks) {
+ BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
+ if (benchmark.complexity() != oNone)
+ reports_for_family = &per_family_reports[benchmark.family_index()];
+
+ runners.emplace_back(benchmark, reports_for_family);
+ int num_repeats_of_this_instance = runners.back().GetNumRepeats();
+ num_repetitions_total += num_repeats_of_this_instance;
+ if (reports_for_family)
+ reports_for_family->num_runs_total += num_repeats_of_this_instance;
+ }
+ assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
+
+ std::vector<size_t> repetition_indices;
+ repetition_indices.reserve(num_repetitions_total);
+ for (size_t runner_index = 0, num_runners = runners.size();
+ runner_index != num_runners; ++runner_index) {
+ const internal::BenchmarkRunner& runner = runners[runner_index];
+ std::fill_n(std::back_inserter(repetition_indices),
+ runner.GetNumRepeats(), runner_index);
+ }
+ assert(repetition_indices.size() == num_repetitions_total &&
+ "Unexpected number of repetition indexes.");
+
+ if (FLAGS_benchmark_enable_random_interleaving) {
+ std::random_device rd;
+ std::mt19937 g(rd());
+ std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
+ }
+
+ for (size_t repetition_index : repetition_indices) {
+ internal::BenchmarkRunner& runner = runners[repetition_index];
+ runner.DoOneRepetition();
+ if (runner.HasRepeatsRemaining()) continue;
+ // FIXME: report each repetition separately, not all of them in bulk.
+
+ RunResults run_results = runner.GetResults();
+
+ // Maybe calculate complexity report
+ if (const auto* reports_for_family = runner.GetReportsForFamily()) {
+ if (reports_for_family->num_runs_done ==
+ reports_for_family->num_runs_total) {
+ auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
+ run_results.aggregates_only.insert(run_results.aggregates_only.end(),
+ additional_run_stats.begin(),
+ additional_run_stats.end());
+ per_family_reports.erase(
+ static_cast<int>(reports_for_family->Runs.front().family_index));
+ }
+ }
+
+ Report(display_reporter, file_reporter, run_results);
+ }
+ }
+ display_reporter->Finalize();
+ if (file_reporter) file_reporter->Finalize();
+ FlushStreams(display_reporter);
+ FlushStreams(file_reporter);
+}
+
+// Disable deprecated warnings temporarily because we need to reference
+// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
+BENCHMARK_DISABLE_DEPRECATED_WARNING
+
+std::unique_ptr<BenchmarkReporter> CreateReporter(
+ std::string const& name, ConsoleReporter::OutputOptions output_opts) {
+ typedef std::unique_ptr<BenchmarkReporter> PtrType;
+ if (name == "console") {
+ return PtrType(new ConsoleReporter(output_opts));
+ } else if (name == "json") {
+ return PtrType(new JSONReporter);
+ } else if (name == "csv") {
+ return PtrType(new CSVReporter);
+ } else {
+ std::cerr << "Unexpected format: '" << name << "'\n";
+ std::exit(1);
+ }
+}
+
+BENCHMARK_RESTORE_DEPRECATED_WARNING
+
+} // end namespace
+
+bool IsZero(double n) {
+ return std::abs(n) < std::numeric_limits<double>::epsilon();
+}
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
+ int output_opts = ConsoleReporter::OO_Defaults;
+ auto is_benchmark_color = [force_no_color]() -> bool {
+ if (force_no_color) {
+ return false;
+ }
+ if (FLAGS_benchmark_color == "auto") {
+ return IsColorTerminal();
+ }
+ return IsTruthyFlagValue(FLAGS_benchmark_color);
+ };
+ if (is_benchmark_color()) {
+ output_opts |= ConsoleReporter::OO_Color;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Color;
+ }
+ if (FLAGS_benchmark_counters_tabular) {
+ output_opts |= ConsoleReporter::OO_Tabular;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Tabular;
+ }
+ return static_cast<ConsoleReporter::OutputOptions>(output_opts);
+}
+
+} // end namespace internal
+
+size_t RunSpecifiedBenchmarks() {
+ return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter);
+}
+
+size_t RunSpecifiedBenchmarks(std::string spec) {
+ return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec));
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
+ return RunSpecifiedBenchmarks(display_reporter, nullptr,
+ FLAGS_benchmark_filter);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ std::string spec) {
+ return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec));
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter) {
+ return RunSpecifiedBenchmarks(display_reporter, file_reporter,
+ FLAGS_benchmark_filter);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
+ BenchmarkReporter* file_reporter,
+ std::string spec) {
+ if (spec.empty() || spec == "all")
+ spec = "."; // Regexp that matches all benchmarks
+
+ // Setup the reporters
+ std::ofstream output_file;
+ std::unique_ptr<BenchmarkReporter> default_display_reporter;
+ std::unique_ptr<BenchmarkReporter> default_file_reporter;
+ if (!display_reporter) {
+ default_display_reporter = internal::CreateReporter(
+ FLAGS_benchmark_format, internal::GetOutputOptions());
+ display_reporter = default_display_reporter.get();
+ }
+ auto& Out = display_reporter->GetOutputStream();
+ auto& Err = display_reporter->GetErrorStream();
+
+ std::string const& fname = FLAGS_benchmark_out;
+ if (fname.empty() && file_reporter) {
+ Err << "A custom file reporter was provided but "
+ "--benchmark_out=<file> was not specified."
+ << std::endl;
+ std::exit(1);
+ }
+ if (!fname.empty()) {
+ output_file.open(fname);
+ if (!output_file.is_open()) {
+ Err << "invalid file name: '" << fname << "'" << std::endl;
+ std::exit(1);
+ }
+ if (!file_reporter) {
+ default_file_reporter = internal::CreateReporter(
+ FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
+ file_reporter = default_file_reporter.get();
+ }
+ file_reporter->SetOutputStream(&output_file);
+ file_reporter->SetErrorStream(&output_file);
+ }
+
+ std::vector<internal::BenchmarkInstance> benchmarks;
+ if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
+
+ if (benchmarks.empty()) {
+ Err << "Failed to match any benchmarks against regex: " << spec << "\n";
+ return 0;
+ }
+
+ if (FLAGS_benchmark_list_tests) {
+ for (auto const& benchmark : benchmarks)
+ Out << benchmark.name().str() << "\n";
+ } else {
+ internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
+ }
+
+ return benchmarks.size();
+}
+
+std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; }
+
+void RegisterMemoryManager(MemoryManager* manager) {
+ internal::memory_manager = manager;
+}
+
+void AddCustomContext(const std::string& key, const std::string& value) {
+ if (internal::global_context == nullptr) {
+ internal::global_context = new std::map<std::string, std::string>();
+ }
+ if (!internal::global_context->emplace(key, value).second) {
+ std::cerr << "Failed to add custom context \"" << key << "\" as it already "
+ << "exists with value \"" << value << "\"\n";
+ }
+}
+
+namespace internal {
+
+void PrintUsageAndExit() {
+ fprintf(stdout,
+ "benchmark"
+ " [--benchmark_list_tests={true|false}]\n"
+ " [--benchmark_filter=<regex>]\n"
+ " [--benchmark_min_time=<min_time>]\n"
+ " [--benchmark_repetitions=<num_repetitions>]\n"
+ " [--benchmark_enable_random_interleaving={true|false}]\n"
+ " [--benchmark_report_aggregates_only={true|false}]\n"
+ " [--benchmark_display_aggregates_only={true|false}]\n"
+ " [--benchmark_format=<console|json|csv>]\n"
+ " [--benchmark_out=<filename>]\n"
+ " [--benchmark_out_format=<json|console|csv>]\n"
+ " [--benchmark_color={auto|true|false}]\n"
+ " [--benchmark_counters_tabular={true|false}]\n"
+ " [--benchmark_perf_counters=<counter>,...]\n"
+ " [--benchmark_context=<key>=<value>,...]\n"
+ " [--v=<verbosity>]\n");
+ exit(0);
+}
+
+void ParseCommandLineFlags(int* argc, char** argv) {
+ using namespace benchmark;
+ BenchmarkReporter::Context::executable_name =
+ (argc && *argc > 0) ? argv[0] : "unknown";
+ for (int i = 1; argc && i < *argc; ++i) {
+ if (ParseBoolFlag(argv[i], "benchmark_list_tests",
+ &FLAGS_benchmark_list_tests) ||
+ ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
+ ParseDoubleFlag(argv[i], "benchmark_min_time",
+ &FLAGS_benchmark_min_time) ||
+ ParseInt32Flag(argv[i], "benchmark_repetitions",
+ &FLAGS_benchmark_repetitions) ||
+ ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
+ &FLAGS_benchmark_enable_random_interleaving) ||
+ ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
+ &FLAGS_benchmark_report_aggregates_only) ||
+ ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
+ &FLAGS_benchmark_display_aggregates_only) ||
+ ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
+ ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
+ ParseStringFlag(argv[i], "benchmark_out_format",
+ &FLAGS_benchmark_out_format) ||
+ ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
+ ParseBoolFlag(argv[i], "benchmark_counters_tabular",
+ &FLAGS_benchmark_counters_tabular) ||
+ ParseStringFlag(argv[i], "benchmark_perf_counters",
+ &FLAGS_benchmark_perf_counters) ||
+ ParseKeyValueFlag(argv[i], "benchmark_context",
+ &FLAGS_benchmark_context) ||
+ ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
+ for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
+
+ --(*argc);
+ --i;
+ } else if (IsFlag(argv[i], "help")) {
+ PrintUsageAndExit();
+ }
+ }
+ for (auto const* flag :
+ {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
+ if (*flag != "console" && *flag != "json" && *flag != "csv") {
+ PrintUsageAndExit();
+ }
+ }
+ if (FLAGS_benchmark_color.empty()) {
+ PrintUsageAndExit();
+ }
+ for (const auto& kv : FLAGS_benchmark_context) {
+ AddCustomContext(kv.first, kv.second);
+ }
+}
+
+int InitializeStreams() {
+ static std::ios_base::Init init;
+ return 0;
+}
+
+} // end namespace internal
+
+void Initialize(int* argc, char** argv) {
+ internal::ParseCommandLineFlags(argc, argv);
+ internal::LogLevel() = FLAGS_v;
+}
+
+void Shutdown() { delete internal::global_context; }
+
+bool ReportUnrecognizedArguments(int argc, char** argv) {
+ for (int i = 1; i < argc; ++i) {
+ fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
+ argv[i]);
+ }
+ return argc > 1;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/benchmark_api_internal.cc b/contrib/libs/benchmark/src/benchmark_api_internal.cc
new file mode 100644
index 00000000000..4de36e3c8ba
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_api_internal.cc
@@ -0,0 +1,112 @@
+#include "benchmark_api_internal.h"
+
+#include <cinttypes>
+
+#include "string_util.h"
+
+namespace benchmark {
+namespace internal {
+
+BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx,
+ int per_family_instance_idx,
+ const std::vector<int64_t>& args,
+ int thread_count)
+ : benchmark_(*benchmark),
+ family_index_(family_idx),
+ per_family_instance_index_(per_family_instance_idx),
+ aggregation_report_mode_(benchmark_.aggregation_report_mode_),
+ args_(args),
+ time_unit_(benchmark_.time_unit_),
+ measure_process_cpu_time_(benchmark_.measure_process_cpu_time_),
+ use_real_time_(benchmark_.use_real_time_),
+ use_manual_time_(benchmark_.use_manual_time_),
+ complexity_(benchmark_.complexity_),
+ complexity_lambda_(benchmark_.complexity_lambda_),
+ statistics_(benchmark_.statistics_),
+ repetitions_(benchmark_.repetitions_),
+ min_time_(benchmark_.min_time_),
+ iterations_(benchmark_.iterations_),
+ threads_(thread_count) {
+ name_.function_name = benchmark_.name_;
+
+ size_t arg_i = 0;
+ for (const auto& arg : args) {
+ if (!name_.args.empty()) {
+ name_.args += '/';
+ }
+
+ if (arg_i < benchmark->arg_names_.size()) {
+ const auto& arg_name = benchmark_.arg_names_[arg_i];
+ if (!arg_name.empty()) {
+ name_.args += StrFormat("%s:", arg_name.c_str());
+ }
+ }
+
+ name_.args += StrFormat("%" PRId64, arg);
+ ++arg_i;
+ }
+
+ if (!IsZero(benchmark->min_time_)) {
+ name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_);
+ }
+
+ if (benchmark_.iterations_ != 0) {
+ name_.iterations = StrFormat(
+ "iterations:%lu", static_cast<unsigned long>(benchmark_.iterations_));
+ }
+
+ if (benchmark_.repetitions_ != 0) {
+ name_.repetitions = StrFormat("repeats:%d", benchmark_.repetitions_);
+ }
+
+ if (benchmark_.measure_process_cpu_time_) {
+ name_.time_type = "process_time";
+ }
+
+ if (benchmark_.use_manual_time_) {
+ if (!name_.time_type.empty()) {
+ name_.time_type += '/';
+ }
+ name_.time_type += "manual_time";
+ } else if (benchmark_.use_real_time_) {
+ if (!name_.time_type.empty()) {
+ name_.time_type += '/';
+ }
+ name_.time_type += "real_time";
+ }
+
+ if (!benchmark_.thread_counts_.empty()) {
+ name_.threads = StrFormat("threads:%d", threads_);
+ }
+
+ setup_ = benchmark_.setup_;
+ teardown_ = benchmark_.teardown_;
+}
+
+State BenchmarkInstance::Run(
+ IterationCount iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager,
+ internal::PerfCountersMeasurement* perf_counters_measurement) const {
+ State st(iters, args_, thread_id, threads_, timer, manager,
+ perf_counters_measurement);
+ benchmark_.Run(st);
+ return st;
+}
+
+void BenchmarkInstance::Setup() const {
+ if (setup_) {
+ State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr,
+ nullptr);
+ setup_(st);
+ }
+}
+
+void BenchmarkInstance::Teardown() const {
+ if (teardown_) {
+ State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr,
+ nullptr);
+ teardown_(st);
+ }
+}
+} // namespace internal
+} // namespace benchmark
diff --git a/contrib/libs/benchmark/src/benchmark_api_internal.h b/contrib/libs/benchmark/src/benchmark_api_internal.h
new file mode 100644
index 00000000000..94c2b2972bb
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_api_internal.h
@@ -0,0 +1,84 @@
+#ifndef BENCHMARK_API_INTERNAL_H
+#define BENCHMARK_API_INTERNAL_H
+
+#include <cmath>
+#include <iosfwd>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "commandlineflags.h"
+
+namespace benchmark {
+namespace internal {
+
+// Information kept per benchmark we may want to run
+class BenchmarkInstance {
+ public:
+ BenchmarkInstance(Benchmark* benchmark, int family_index,
+ int per_family_instance_index,
+ const std::vector<int64_t>& args, int threads);
+
+ const BenchmarkName& name() const { return name_; }
+ int family_index() const { return family_index_; }
+ int per_family_instance_index() const { return per_family_instance_index_; }
+ AggregationReportMode aggregation_report_mode() const {
+ return aggregation_report_mode_;
+ }
+ TimeUnit time_unit() const { return time_unit_; }
+ bool measure_process_cpu_time() const { return measure_process_cpu_time_; }
+ bool use_real_time() const { return use_real_time_; }
+ bool use_manual_time() const { return use_manual_time_; }
+ BigO complexity() const { return complexity_; }
+ BigOFunc* complexity_lambda() const { return complexity_lambda_; }
+ const std::vector<Statistics>& statistics() const { return statistics_; }
+ int repetitions() const { return repetitions_; }
+ double min_time() const { return min_time_; }
+ IterationCount iterations() const { return iterations_; }
+ int threads() const { return threads_; }
+ void Setup() const;
+ void Teardown() const;
+
+ State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager,
+ internal::PerfCountersMeasurement* perf_counters_measurement) const;
+
+ private:
+ BenchmarkName name_;
+ Benchmark& benchmark_;
+ const int family_index_;
+ const int per_family_instance_index_;
+ AggregationReportMode aggregation_report_mode_;
+ const std::vector<int64_t>& args_;
+ TimeUnit time_unit_;
+ bool measure_process_cpu_time_;
+ bool use_real_time_;
+ bool use_manual_time_;
+ BigO complexity_;
+ BigOFunc* complexity_lambda_;
+ UserCounters counters_;
+ const std::vector<Statistics>& statistics_;
+ int repetitions_;
+ double min_time_;
+ IterationCount iterations_;
+ int threads_; // Number of concurrent threads to us
+
+ typedef void (*callback_function)(const benchmark::State&);
+ callback_function setup_ = nullptr;
+ callback_function teardown_ = nullptr;
+};
+
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err);
+
+bool IsZero(double n);
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_API_INTERNAL_H
diff --git a/contrib/libs/benchmark/src/benchmark_name.cc b/contrib/libs/benchmark/src/benchmark_name.cc
new file mode 100644
index 00000000000..2a17ebce277
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_name.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <benchmark/benchmark.h>
+
+namespace benchmark {
+
+namespace {
+
+// Compute the total size of a pack of std::strings
+size_t size_impl() { return 0; }
+
+template <typename Head, typename... Tail>
+size_t size_impl(const Head& head, const Tail&... tail) {
+ return head.size() + size_impl(tail...);
+}
+
+// Join a pack of std::strings using a delimiter
+// TODO: use absl::StrJoin
+void join_impl(std::string&, char) {}
+
+template <typename Head, typename... Tail>
+void join_impl(std::string& s, const char delimiter, const Head& head,
+ const Tail&... tail) {
+ if (!s.empty() && !head.empty()) {
+ s += delimiter;
+ }
+
+ s += head;
+
+ join_impl(s, delimiter, tail...);
+}
+
+template <typename... Ts>
+std::string join(char delimiter, const Ts&... ts) {
+ std::string s;
+ s.reserve(sizeof...(Ts) + size_impl(ts...));
+ join_impl(s, delimiter, ts...);
+ return s;
+}
+} // namespace
+
+std::string BenchmarkName::str() const {
+ return join('/', function_name, args, min_time, iterations, repetitions,
+ time_type, threads);
+}
+} // namespace benchmark
diff --git a/contrib/libs/benchmark/src/benchmark_register.cc b/contrib/libs/benchmark/src/benchmark_register.cc
new file mode 100644
index 00000000000..61a0c26178e
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_register.cc
@@ -0,0 +1,492 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_register.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cinttypes>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <numeric>
+#include <sstream>
+#include <thread>
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "check.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+namespace {
+// For non-dense Range, intermediate values are powers of kRangeMultiplier.
+static const int kRangeMultiplier = 8;
+// The size of a benchmark family determines is the number of inputs to repeat
+// the benchmark on. If this is "large" then warn the user during configuration.
+static const size_t kMaxFamilySize = 100;
+} // end namespace
+
+namespace internal {
+
+//=============================================================================//
+// BenchmarkFamilies
+//=============================================================================//
+
+// Class for managing registered benchmarks. Note that each registered
+// benchmark identifies a family of related benchmarks to run.
+class BenchmarkFamilies {
+ public:
+ static BenchmarkFamilies* GetInstance();
+
+ // Registers a benchmark family and returns the index assigned to it.
+ size_t AddBenchmark(std::unique_ptr<Benchmark> family);
+
+ // Clear all registered benchmark families.
+ void ClearBenchmarks();
+
+ // Extract the list of benchmark instances that match the specified
+ // regular expression.
+ bool FindBenchmarks(std::string re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err);
+
+ private:
+ BenchmarkFamilies() {}
+
+ std::vector<std::unique_ptr<Benchmark>> families_;
+ Mutex mutex_;
+};
+
+BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
+ static BenchmarkFamilies instance;
+ return &instance;
+}
+
+size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
+ MutexLock l(mutex_);
+ size_t index = families_.size();
+ families_.push_back(std::move(family));
+ return index;
+}
+
+void BenchmarkFamilies::ClearBenchmarks() {
+ MutexLock l(mutex_);
+ families_.clear();
+ families_.shrink_to_fit();
+}
+
+bool BenchmarkFamilies::FindBenchmarks(
+ std::string spec, std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* ErrStream) {
+ BM_CHECK(ErrStream);
+ auto& Err = *ErrStream;
+ // Make regular expression out of command-line flag
+ std::string error_msg;
+ Regex re;
+ bool isNegativeFilter = false;
+ if (spec[0] == '-') {
+ spec.replace(0, 1, "");
+ isNegativeFilter = true;
+ }
+ if (!re.Init(spec, &error_msg)) {
+ Err << "Could not compile benchmark re: " << error_msg << std::endl;
+ return false;
+ }
+
+ // Special list of thread counts to use when none are specified
+ const std::vector<int> one_thread = {1};
+
+ int next_family_index = 0;
+
+ MutexLock l(mutex_);
+ for (std::unique_ptr<Benchmark>& family : families_) {
+ int family_index = next_family_index;
+ int per_family_instance_index = 0;
+
+ // Family was deleted or benchmark doesn't match
+ if (!family) continue;
+
+ if (family->ArgsCnt() == -1) {
+ family->Args({});
+ }
+ const std::vector<int>* thread_counts =
+ (family->thread_counts_.empty()
+ ? &one_thread
+ : &static_cast<const std::vector<int>&>(family->thread_counts_));
+ const size_t family_size = family->args_.size() * thread_counts->size();
+ // The benchmark will be run at least 'family_size' different inputs.
+ // If 'family_size' is very large warn the user.
+ if (family_size > kMaxFamilySize) {
+ Err << "The number of inputs is very large. " << family->name_
+ << " will be repeated at least " << family_size << " times.\n";
+ }
+ // reserve in the special case the regex ".", since we know the final
+ // family size.
+ if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size);
+
+ for (auto const& args : family->args_) {
+ for (int num_threads : *thread_counts) {
+ BenchmarkInstance instance(family.get(), family_index,
+ per_family_instance_index, args,
+ num_threads);
+
+ const auto full_name = instance.name().str();
+ if ((re.Match(full_name) && !isNegativeFilter) ||
+ (!re.Match(full_name) && isNegativeFilter)) {
+ benchmarks->push_back(std::move(instance));
+
+ ++per_family_instance_index;
+
+ // Only bump the next family index once we've estabilished that
+ // at least one instance of this family will be run.
+ if (next_family_index == family_index) ++next_family_index;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
+ std::unique_ptr<Benchmark> bench_ptr(bench);
+ BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
+ families->AddBenchmark(std::move(bench_ptr));
+ return bench;
+}
+
+// FIXME: This function is a hack so that benchmark.cc can access
+// `BenchmarkFamilies`
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<BenchmarkInstance>* benchmarks,
+ std::ostream* Err) {
+ return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
+}
+
+//=============================================================================//
+// Benchmark
+//=============================================================================//
+
+Benchmark::Benchmark(const char* name)
+ : name_(name),
+ aggregation_report_mode_(ARM_Unspecified),
+ time_unit_(kNanosecond),
+ range_multiplier_(kRangeMultiplier),
+ min_time_(0),
+ iterations_(0),
+ repetitions_(0),
+ measure_process_cpu_time_(false),
+ use_real_time_(false),
+ use_manual_time_(false),
+ complexity_(oNone),
+ complexity_lambda_(nullptr),
+ setup_(nullptr),
+ teardown_(nullptr) {
+ ComputeStatistics("mean", StatisticsMean);
+ ComputeStatistics("median", StatisticsMedian);
+ ComputeStatistics("stddev", StatisticsStdDev);
+ ComputeStatistics("cv", StatisticsCV, kPercentage);
+}
+
+Benchmark::~Benchmark() {}
+
+Benchmark* Benchmark::Name(const std::string& name) {
+ SetName(name.c_str());
+ return this;
+}
+
+Benchmark* Benchmark::Arg(int64_t x) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ args_.push_back({x});
+ return this;
+}
+
+Benchmark* Benchmark::Unit(TimeUnit unit) {
+ time_unit_ = unit;
+ return this;
+}
+
+Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ std::vector<int64_t> arglist;
+ AddRange(&arglist, start, limit, range_multiplier_);
+
+ for (int64_t i : arglist) {
+ args_.push_back({i});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Ranges(
+ const std::vector<std::pair<int64_t, int64_t>>& ranges) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
+ std::vector<std::vector<int64_t>> arglists(ranges.size());
+ for (std::size_t i = 0; i < ranges.size(); i++) {
+ AddRange(&arglists[i], ranges[i].first, ranges[i].second,
+ range_multiplier_);
+ }
+
+ ArgsProduct(arglists);
+
+ return this;
+}
+
+Benchmark* Benchmark::ArgsProduct(
+ const std::vector<std::vector<int64_t>>& arglists) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
+
+ std::vector<std::size_t> indices(arglists.size());
+ const std::size_t total = std::accumulate(
+ std::begin(arglists), std::end(arglists), std::size_t{1},
+ [](const std::size_t res, const std::vector<int64_t>& arglist) {
+ return res * arglist.size();
+ });
+ std::vector<int64_t> args;
+ args.reserve(arglists.size());
+ for (std::size_t i = 0; i < total; i++) {
+ for (std::size_t arg = 0; arg < arglists.size(); arg++) {
+ args.push_back(arglists[arg][indices[arg]]);
+ }
+ args_.push_back(args);
+ args.clear();
+
+ std::size_t arg = 0;
+ do {
+ indices[arg] = (indices[arg] + 1) % arglists[arg].size();
+ } while (indices[arg++] == 0 && arg < arglists.size());
+ }
+
+ return this;
+}
+
+Benchmark* Benchmark::ArgName(const std::string& name) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ arg_names_ = {name};
+ return this;
+}
+
+Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
+ arg_names_ = names;
+ return this;
+}
+
+Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ BM_CHECK_LE(start, limit);
+ for (int64_t arg = start; arg <= limit; arg += step) {
+ args_.push_back({arg});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
+ BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
+ args_.push_back(args);
+ return this;
+}
+
+Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
+ custom_arguments(this);
+ return this;
+}
+
+Benchmark* Benchmark::Setup(void (*setup)(const benchmark::State&)) {
+ BM_CHECK(setup != nullptr);
+ setup_ = setup;
+ return this;
+}
+
+Benchmark* Benchmark::Teardown(void (*teardown)(const benchmark::State&)) {
+ BM_CHECK(teardown != nullptr);
+ teardown_ = teardown;
+ return this;
+}
+
+Benchmark* Benchmark::RangeMultiplier(int multiplier) {
+ BM_CHECK(multiplier > 1);
+ range_multiplier_ = multiplier;
+ return this;
+}
+
+Benchmark* Benchmark::MinTime(double t) {
+ BM_CHECK(t > 0.0);
+ BM_CHECK(iterations_ == 0);
+ min_time_ = t;
+ return this;
+}
+
+Benchmark* Benchmark::Iterations(IterationCount n) {
+ BM_CHECK(n > 0);
+ BM_CHECK(IsZero(min_time_));
+ iterations_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::Repetitions(int n) {
+ BM_CHECK(n > 0);
+ repetitions_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
+ aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
+ return this;
+}
+
+Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
+ // If we were called, the report mode is no longer 'unspecified', in any case.
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_Default);
+
+ if (value) {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
+ } else {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
+ }
+
+ return this;
+}
+
+Benchmark* Benchmark::MeasureProcessCPUTime() {
+ // Can be used together with UseRealTime() / UseManualTime().
+ measure_process_cpu_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::UseRealTime() {
+ BM_CHECK(!use_manual_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_real_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::UseManualTime() {
+ BM_CHECK(!use_real_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_manual_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigO complexity) {
+ complexity_ = complexity;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
+ complexity_lambda_ = complexity;
+ complexity_ = oLambda;
+ return this;
+}
+
+Benchmark* Benchmark::ComputeStatistics(const std::string& name,
+ StatisticsFunc* statistics,
+ StatisticUnit unit) {
+ statistics_.emplace_back(name, statistics, unit);
+ return this;
+}
+
+Benchmark* Benchmark::Threads(int t) {
+ BM_CHECK_GT(t, 0);
+ thread_counts_.push_back(t);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
+ BM_CHECK_GT(min_threads, 0);
+ BM_CHECK_GE(max_threads, min_threads);
+
+ AddRange(&thread_counts_, min_threads, max_threads, 2);
+ return this;
+}
+
+Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
+ int stride) {
+ BM_CHECK_GT(min_threads, 0);
+ BM_CHECK_GE(max_threads, min_threads);
+ BM_CHECK_GE(stride, 1);
+
+ for (auto i = min_threads; i < max_threads; i += stride) {
+ thread_counts_.push_back(i);
+ }
+ thread_counts_.push_back(max_threads);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadPerCpu() {
+ thread_counts_.push_back(CPUInfo::Get().num_cpus);
+ return this;
+}
+
+void Benchmark::SetName(const char* name) { name_ = name; }
+
+int Benchmark::ArgsCnt() const {
+ if (args_.empty()) {
+ if (arg_names_.empty()) return -1;
+ return static_cast<int>(arg_names_.size());
+ }
+ return static_cast<int>(args_.front().size());
+}
+
+//=============================================================================//
+// FunctionBenchmark
+//=============================================================================//
+
+void FunctionBenchmark::Run(State& st) { func_(st); }
+
+} // end namespace internal
+
+void ClearRegisteredBenchmarks() {
+ internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
+}
+
+std::vector<int64_t> CreateRange(int64_t lo, int64_t hi, int multi) {
+ std::vector<int64_t> args;
+ internal::AddRange(&args, lo, hi, multi);
+ return args;
+}
+
+std::vector<int64_t> CreateDenseRange(int64_t start, int64_t limit, int step) {
+ BM_CHECK_LE(start, limit);
+ std::vector<int64_t> args;
+ for (int64_t arg = start; arg <= limit; arg += step) {
+ args.push_back(arg);
+ }
+ return args;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/benchmark_register.h b/contrib/libs/benchmark/src/benchmark_register.h
new file mode 100644
index 00000000000..d3f4974e907
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_register.h
@@ -0,0 +1,108 @@
+#ifndef BENCHMARK_REGISTER_H
+#define BENCHMARK_REGISTER_H
+
+#include <limits>
+#include <vector>
+
+#include "check.h"
+
+namespace benchmark {
+namespace internal {
+
+// Append the powers of 'mult' in the closed interval [lo, hi].
+// Returns iterator to the start of the inserted range.
+template <typename T>
+typename std::vector<T>::iterator AddPowers(std::vector<T>* dst, T lo, T hi,
+ int mult) {
+ BM_CHECK_GE(lo, 0);
+ BM_CHECK_GE(hi, lo);
+ BM_CHECK_GE(mult, 2);
+
+ const size_t start_offset = dst->size();
+
+ static const T kmax = std::numeric_limits<T>::max();
+
+ // Space out the values in multiples of "mult"
+ for (T i = static_cast<T>(1); i <= hi; i *= mult) {
+ if (i >= lo) {
+ dst->push_back(i);
+ }
+ // Break the loop here since multiplying by
+ // 'mult' would move outside of the range of T
+ if (i > kmax / mult) break;
+ }
+
+ return dst->begin() + start_offset;
+}
+
+template <typename T>
+void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+ // We negate lo and hi so we require that they cannot be equal to 'min'.
+ BM_CHECK_GT(lo, std::numeric_limits<T>::min());
+ BM_CHECK_GT(hi, std::numeric_limits<T>::min());
+ BM_CHECK_GE(hi, lo);
+ BM_CHECK_LE(hi, 0);
+
+ // Add positive powers, then negate and reverse.
+ // Casts necessary since small integers get promoted
+ // to 'int' when negating.
+ const auto lo_complement = static_cast<T>(-lo);
+ const auto hi_complement = static_cast<T>(-hi);
+
+ const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
+
+ std::for_each(it, dst->end(), [](T& t) { t *= -1; });
+ std::reverse(it, dst->end());
+}
+
+template <typename T>
+void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Args type must be a signed integer");
+
+ BM_CHECK_GE(hi, lo);
+ BM_CHECK_GE(mult, 2);
+
+ // Add "lo"
+ dst->push_back(lo);
+
+ // Handle lo == hi as a special case, so we then know
+ // lo < hi and so it is safe to add 1 to lo and subtract 1
+ // from hi without falling outside of the range of T.
+ if (lo == hi) return;
+
+ // Ensure that lo_inner <= hi_inner below.
+ if (lo + 1 == hi) {
+ dst->push_back(hi);
+ return;
+ }
+
+ // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
+ const auto lo_inner = static_cast<T>(lo + 1);
+ const auto hi_inner = static_cast<T>(hi - 1);
+
+ // Insert negative values
+ if (lo_inner < 0) {
+ AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
+ }
+
+ // Treat 0 as a special case (see discussion on #762).
+ if (lo < 0 && hi >= 0) {
+ dst->push_back(0);
+ }
+
+ // Insert positive values
+ if (hi_inner > 0) {
+ AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
+ }
+
+ // Add "hi" (if different from last value).
+ if (hi != dst->back()) {
+ dst->push_back(hi);
+ }
+}
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_REGISTER_H
diff --git a/contrib/libs/benchmark/src/benchmark_runner.cc b/contrib/libs/benchmark/src/benchmark_runner.cc
new file mode 100644
index 00000000000..617973a78d3
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_runner.cc
@@ -0,0 +1,361 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_runner.h"
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "perf_counters.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+namespace benchmark {
+
+namespace internal {
+
+MemoryManager* memory_manager = nullptr;
+
+namespace {
+
+static constexpr IterationCount kMaxIterations = 1000000000;
+
+BenchmarkReporter::Run CreateRunReport(
+ const benchmark::internal::BenchmarkInstance& b,
+ const internal::ThreadManager::Result& results,
+ IterationCount memory_iterations,
+ const MemoryManager::Result* memory_result, double seconds,
+ int64_t repetition_index, int64_t repeats) {
+ // Create report about this benchmark run.
+ BenchmarkReporter::Run report;
+
+ report.run_name = b.name();
+ report.family_index = b.family_index();
+ report.per_family_instance_index = b.per_family_instance_index();
+ report.error_occurred = results.has_error_;
+ report.error_message = results.error_message_;
+ report.report_label = results.report_label_;
+ // This is the total iterations across all threads.
+ report.iterations = results.iterations;
+ report.time_unit = b.time_unit();
+ report.threads = b.threads();
+ report.repetition_index = repetition_index;
+ report.repetitions = repeats;
+
+ if (!report.error_occurred) {
+ // This is the total time across all threads.
+ if (b.use_manual_time()) {
+ report.real_accumulated_time = results.manual_time_used;
+ } else {
+ report.real_accumulated_time = results.real_time_used;
+ }
+ report.cpu_accumulated_time = results.cpu_time_used;
+ report.complexity_n = results.complexity_n;
+ report.complexity = b.complexity();
+ report.complexity_lambda = b.complexity_lambda();
+ report.statistics = &b.statistics();
+ report.counters = results.counters;
+
+ if (memory_iterations > 0) {
+ assert(memory_result != nullptr);
+ report.memory_result = memory_result;
+ report.allocs_per_iter =
+ memory_iterations ? static_cast<double>(memory_result->num_allocs) /
+ memory_iterations
+ : 0;
+ }
+
+ internal::Finish(&report.counters, results.iterations, seconds,
+ b.threads());
+ }
+ return report;
+}
+
+// Execute one thread of benchmark b for the specified number of iterations.
+// Adds the stats collected for the thread into manager->results.
+void RunInThread(const BenchmarkInstance* b, IterationCount iters,
+ int thread_id, ThreadManager* manager,
+ PerfCountersMeasurement* perf_counters_measurement) {
+ internal::ThreadTimer timer(
+ b->measure_process_cpu_time()
+ ? internal::ThreadTimer::CreateProcessCpuTime()
+ : internal::ThreadTimer::Create());
+ State st =
+ b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
+ BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
+ << "Benchmark returned before State::KeepRunning() returned false!";
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ internal::ThreadManager::Result& results = manager->results;
+ results.iterations += st.iterations();
+ results.cpu_time_used += timer.cpu_time_used();
+ results.real_time_used += timer.real_time_used();
+ results.manual_time_used += timer.manual_time_used();
+ results.complexity_n += st.complexity_length_n();
+ internal::Increment(&results.counters, st.counters);
+ }
+ manager->NotifyThreadComplete();
+}
+
+} // end namespace
+
+BenchmarkRunner::BenchmarkRunner(
+ const benchmark::internal::BenchmarkInstance& b_,
+ BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
+ : b(b_),
+ reports_for_family(reports_for_family_),
+ min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
+ repeats(b.repetitions() != 0 ? b.repetitions()
+ : FLAGS_benchmark_repetitions),
+ has_explicit_iteration_count(b.iterations() != 0),
+ pool(b.threads() - 1),
+ iters(has_explicit_iteration_count ? b.iterations() : 1),
+ perf_counters_measurement(
+ PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))),
+ perf_counters_measurement_ptr(perf_counters_measurement.IsValid()
+ ? &perf_counters_measurement
+ : nullptr) {
+ run_results.display_report_aggregates_only =
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
+ run_results.file_report_aggregates_only =
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
+ run_results.display_report_aggregates_only =
+ (b.aggregation_report_mode() &
+ internal::ARM_DisplayReportAggregatesOnly);
+ run_results.file_report_aggregates_only =
+ (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
+ BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
+ perf_counters_measurement.IsValid())
+ << "Perf counters were requested but could not be set up.";
+ }
+}
+
+BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
+ BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
+
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads()));
+
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get(), perf_counters_measurement_ptr);
+ }
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);
+
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
+
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
+
+ // And get rid of the manager.
+ manager.reset();
+
+ // If we were measuring whole-process CPU usage then each thread reports
+ // total CPU time of all threads. Divide by threads to get real value.
+ if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();
+
+ BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
+
+ // By using KeepRunningBatch a benchmark can iterate more times than
+ // requested, so take the iteration count from i.results.
+ i.iters = i.results.iterations / b.threads();
+
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time()) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time()) {
+ i.seconds = i.results.real_time_used;
+ }
+
+ // Adjust time stats to average since they were reported by all threads.
+ i.seconds /= b.threads();
+
+ return i;
+}
+
+IterationCount BenchmarkRunner::PredictNumItersNeeded(
+ const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (i.seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : 10.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const IterationCount max_next_iters = static_cast<IterationCount>(
+ std::lround(std::max(multiplier * static_cast<double>(i.iters),
+ static_cast<double>(i.iters) + 1.0)));
+ // But we do have *some* sanity limits though..
+ const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
+
+ BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+}
+
+bool BenchmarkRunner::ShouldReportIterationResults(
+ const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.has_error_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >= min_time || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this sanity check.
+ ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time());
+}
+
+void BenchmarkRunner::DoOneRepetition() {
+ assert(HasRepeatsRemaining() && "Already done all repetitions?");
+
+ const bool is_the_first_repetition = num_repetitions_done == 0;
+ IterationResults i;
+
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ b.Setup();
+ i = DoNIterations();
+ b.Teardown();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
+
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result* memory_result = nullptr;
+ IterationCount memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an
+ // optional so we don't have to own the Result here.
+ // Can't do it now due to cxx03.
+ memory_results.push_back(MemoryManager::Result());
+ memory_result = &memory_results.back();
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<IterationCount>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ b.Setup();
+ RunInThread(&b, memory_iterations, 0, manager.get(),
+ perf_counters_measurement_ptr);
+ manager->WaitForAllThreads();
+ manager.reset();
+ b.Teardown();
+
+ BENCHMARK_DISABLE_DEPRECATED_WARNING
+ memory_manager->Stop(memory_result);
+ BENCHMARK_RESTORE_DEPRECATED_WARNING
+ }
+
+ // Ok, now actually report.
+ BenchmarkReporter::Run report =
+ CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
+ num_repetitions_done, repeats);
+
+ if (reports_for_family) {
+ ++reports_for_family->num_runs_done;
+ if (!report.error_occurred) reports_for_family->Runs.push_back(report);
+ }
+
+ run_results.non_aggregates.push_back(report);
+
+ ++num_repetitions_done;
+}
+
+RunResults&& BenchmarkRunner::GetResults() {
+ assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
+
+ // Calculate additional statistics over the repetitions of this instance.
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+
+ return std::move(run_results);
+}
+
+} // end namespace internal
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/benchmark_runner.h b/contrib/libs/benchmark/src/benchmark_runner.h
new file mode 100644
index 00000000000..752eefdc26f
--- /dev/null
+++ b/contrib/libs/benchmark/src/benchmark_runner.h
@@ -0,0 +1,104 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RUNNER_H_
+#define BENCHMARK_RUNNER_H_
+
+#include <thread>
+#include <vector>
+
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+#include "perf_counters.h"
+#include "thread_manager.h"
+
+namespace benchmark {
+
+BM_DECLARE_double(benchmark_min_time);
+BM_DECLARE_int32(benchmark_repetitions);
+BM_DECLARE_bool(benchmark_report_aggregates_only);
+BM_DECLARE_bool(benchmark_display_aggregates_only);
+BM_DECLARE_string(benchmark_perf_counters);
+
+namespace internal {
+
+extern MemoryManager* memory_manager;
+
+struct RunResults {
+ std::vector<BenchmarkReporter::Run> non_aggregates;
+ std::vector<BenchmarkReporter::Run> aggregates_only;
+
+ bool display_report_aggregates_only = false;
+ bool file_report_aggregates_only = false;
+};
+
+class BenchmarkRunner {
+ public:
+ BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ BenchmarkReporter::PerFamilyRunReports* reports_for_family);
+
+ int GetNumRepeats() const { return repeats; }
+
+ bool HasRepeatsRemaining() const {
+ return GetNumRepeats() != num_repetitions_done;
+ }
+
+ void DoOneRepetition();
+
+ RunResults&& GetResults();
+
+ BenchmarkReporter::PerFamilyRunReports* GetReportsForFamily() const {
+ return reports_for_family;
+ }
+
+ private:
+ RunResults run_results;
+
+ const benchmark::internal::BenchmarkInstance& b;
+ BenchmarkReporter::PerFamilyRunReports* reports_for_family;
+
+ const double min_time;
+ const int repeats;
+ const bool has_explicit_iteration_count;
+
+ int num_repetitions_done = 0;
+
+ std::vector<std::thread> pool;
+
+ std::vector<MemoryManager::Result> memory_results;
+
+ IterationCount iters; // preserved between repetitions!
+ // So only the first repetition has to find/calculate it,
+ // the other repetitions will just use that precomputed iteration count.
+
+ PerfCountersMeasurement perf_counters_measurement;
+ PerfCountersMeasurement* const perf_counters_measurement_ptr;
+
+ struct IterationResults {
+ internal::ThreadManager::Result results;
+ IterationCount iters;
+ double seconds;
+ };
+ IterationResults DoNIterations();
+
+ IterationCount PredictNumItersNeeded(const IterationResults& i) const;
+
+ bool ShouldReportIterationResults(const IterationResults& i) const;
+};
+
+} // namespace internal
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RUNNER_H_
diff --git a/contrib/libs/benchmark/src/check.h b/contrib/libs/benchmark/src/check.h
new file mode 100644
index 00000000000..90c7bbfa4b4
--- /dev/null
+++ b/contrib/libs/benchmark/src/check.h
@@ -0,0 +1,90 @@
+#ifndef CHECK_H_
+#define CHECK_H_
+
+#include <cmath>
+#include <cstdlib>
+#include <ostream>
+
+#include "internal_macros.h"
+#include "log.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef void(AbortHandlerT)();
+
+inline AbortHandlerT*& GetAbortHandler() {
+ static AbortHandlerT* handler = &std::abort;
+ return handler;
+}
+
+BENCHMARK_NORETURN inline void CallAbortHandler() {
+ GetAbortHandler()();
+ std::abort(); // fallback to enforce noreturn
+}
+
+// CheckHandler is the class constructed by failing BM_CHECK macros.
+// CheckHandler will log information about the failures and abort when it is
+// destructed.
+class CheckHandler {
+ public:
+ CheckHandler(const char* check, const char* file, const char* func, int line)
+ : log_(GetErrorLogInstance()) {
+ log_ << file << ":" << line << ": " << func << ": Check `" << check
+ << "' failed. ";
+ }
+
+ LogType& GetLog() { return log_; }
+
+#if defined(COMPILER_MSVC)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+ BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
+ log_ << std::endl;
+ CallAbortHandler();
+ }
+#if defined(COMPILER_MSVC)
+#pragma warning(pop)
+#endif
+
+ CheckHandler& operator=(const CheckHandler&) = delete;
+ CheckHandler(const CheckHandler&) = delete;
+ CheckHandler() = delete;
+
+ private:
+ LogType& log_;
+};
+
+} // end namespace internal
+} // end namespace benchmark
+
+// The BM_CHECK macro returns a std::ostream object that can have extra
+// information written to it.
+#ifndef NDEBUG
+#define BM_CHECK(b) \
+ (b ? ::benchmark::internal::GetNullLogInstance() \
+ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
+ .GetLog())
+#else
+#define BM_CHECK(b) ::benchmark::internal::GetNullLogInstance()
+#endif
+
+// clang-format off
+// preserve whitespacing between operators for alignment
+#define BM_CHECK_EQ(a, b) BM_CHECK((a) == (b))
+#define BM_CHECK_NE(a, b) BM_CHECK((a) != (b))
+#define BM_CHECK_GE(a, b) BM_CHECK((a) >= (b))
+#define BM_CHECK_LE(a, b) BM_CHECK((a) <= (b))
+#define BM_CHECK_GT(a, b) BM_CHECK((a) > (b))
+#define BM_CHECK_LT(a, b) BM_CHECK((a) < (b))
+
+#define BM_CHECK_FLOAT_EQ(a, b, eps) BM_CHECK(std::fabs((a) - (b)) < (eps))
+#define BM_CHECK_FLOAT_NE(a, b, eps) BM_CHECK(std::fabs((a) - (b)) >= (eps))
+#define BM_CHECK_FLOAT_GE(a, b, eps) BM_CHECK((a) - (b) > -(eps))
+#define BM_CHECK_FLOAT_LE(a, b, eps) BM_CHECK((b) - (a) > -(eps))
+#define BM_CHECK_FLOAT_GT(a, b, eps) BM_CHECK((a) - (b) > (eps))
+#define BM_CHECK_FLOAT_LT(a, b, eps) BM_CHECK((b) - (a) > (eps))
+//clang-format on
+
+#endif // CHECK_H_
diff --git a/contrib/libs/benchmark/src/colorprint.cc b/contrib/libs/benchmark/src/colorprint.cc
new file mode 100644
index 00000000000..1a000a06373
--- /dev/null
+++ b/contrib/libs/benchmark/src/colorprint.cc
@@ -0,0 +1,188 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "colorprint.h"
+
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <string>
+
+#include "check.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <io.h>
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif // BENCHMARK_OS_WINDOWS
+
+namespace benchmark {
+namespace {
+#ifdef BENCHMARK_OS_WINDOWS
+typedef WORD PlatformColorCode;
+#else
+typedef const char* PlatformColorCode;
+#endif
+
+PlatformColorCode GetPlatformColorCode(LogColor color) {
+#ifdef BENCHMARK_OS_WINDOWS
+ switch (color) {
+ case COLOR_RED:
+ return FOREGROUND_RED;
+ case COLOR_GREEN:
+ return FOREGROUND_GREEN;
+ case COLOR_YELLOW:
+ return FOREGROUND_RED | FOREGROUND_GREEN;
+ case COLOR_BLUE:
+ return FOREGROUND_BLUE;
+ case COLOR_MAGENTA:
+ return FOREGROUND_BLUE | FOREGROUND_RED;
+ case COLOR_CYAN:
+ return FOREGROUND_BLUE | FOREGROUND_GREEN;
+ case COLOR_WHITE: // fall through to default
+ default:
+ return 0;
+ }
+#else
+ switch (color) {
+ case COLOR_RED:
+ return "1";
+ case COLOR_GREEN:
+ return "2";
+ case COLOR_YELLOW:
+ return "3";
+ case COLOR_BLUE:
+ return "4";
+ case COLOR_MAGENTA:
+ return "5";
+ case COLOR_CYAN:
+ return "6";
+ case COLOR_WHITE:
+ return "7";
+ default:
+ return nullptr;
+ };
+#endif
+}
+
+} // end namespace
+
+std::string FormatString(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ std::size_t size = 256;
+ char local_buff[256];
+ auto ret = vsnprintf(local_buff, size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // currently there is no error handling for failure, so this is hack.
+ BM_CHECK(ret >= 0);
+
+ if (ret == 0) // handle empty expansion
+ return {};
+ else if (static_cast<size_t>(ret) < size)
+ return local_buff;
+ else {
+ // we did not provide a long enough buffer on our first attempt.
+ size = static_cast<size_t>(ret) + 1; // + 1 for the null byte
+ std::unique_ptr<char[]> buff(new char[size]);
+ ret = vsnprintf(buff.get(), size, msg, args);
+ BM_CHECK(ret > 0 && (static_cast<size_t>(ret)) < size);
+ return buff.get();
+ }
+}
+
+std::string FormatString(const char* msg, ...) {
+ va_list args;
+ va_start(args, msg);
+ auto tmp = FormatString(msg, args);
+ va_end(args);
+ return tmp;
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ColorPrintf(out, color, fmt, args);
+ va_end(args);
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args) {
+#ifdef BENCHMARK_OS_WINDOWS
+ ((void)out); // suppress unused warning
+
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ const char* color_code = GetPlatformColorCode(color);
+ if (color_code) out << FormatString("\033[0;3%sm", color_code);
+ out << FormatString(fmt, args) << "\033[m";
+#endif
+}
+
+bool IsColorTerminal() {
+#if BENCHMARK_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return 0 != _isatty(_fileno(stdout));
+#else
+ // On non-Windows platforms, we rely on the TERM variable. This list of
+ // supported TERM values is copied from Google Test:
+ // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
+ const char* const SUPPORTED_TERM_VALUES[] = {
+ "xterm", "xterm-color", "xterm-256color",
+ "screen", "screen-256color", "tmux",
+ "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
+ "linux", "cygwin",
+ };
+
+ const char* const term = getenv("TERM");
+
+ bool term_supports_color = false;
+ for (const char* candidate : SUPPORTED_TERM_VALUES) {
+ if (term && 0 == strcmp(term, candidate)) {
+ term_supports_color = true;
+ break;
+ }
+ }
+
+ return 0 != isatty(fileno(stdout)) && term_supports_color;
+#endif // BENCHMARK_OS_WINDOWS
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/colorprint.h b/contrib/libs/benchmark/src/colorprint.h
new file mode 100644
index 00000000000..9f6fab9b342
--- /dev/null
+++ b/contrib/libs/benchmark/src/colorprint.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_COLORPRINT_H_
+#define BENCHMARK_COLORPRINT_H_
+
+#include <cstdarg>
+#include <iostream>
+#include <string>
+
+namespace benchmark {
+enum LogColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW,
+ COLOR_BLUE,
+ COLOR_MAGENTA,
+ COLOR_CYAN,
+ COLOR_WHITE
+};
+
+std::string FormatString(const char* msg, va_list args);
+std::string FormatString(const char* msg, ...);
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args);
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
+
+// Returns true if stdout appears to be a terminal that supports colored
+// output, false otherwise.
+bool IsColorTerminal();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_COLORPRINT_H_
diff --git a/contrib/libs/benchmark/src/commandlineflags.cc b/contrib/libs/benchmark/src/commandlineflags.cc
new file mode 100644
index 00000000000..9615e351ffa
--- /dev/null
+++ b/contrib/libs/benchmark/src/commandlineflags.cc
@@ -0,0 +1,285 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "commandlineflags.h"
+
+#include <algorithm>
+#include <cctype>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <utility>
+
+#include "../src/string_util.h"
+
+namespace benchmark {
+namespace {
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const int32_t result = static_cast<int32_t>(long_value);
+ if (long_value == std::numeric_limits<long>::max() ||
+ long_value == std::numeric_limits<long>::min() ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\", "
+ << "which overflows.\n";
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Parses 'str' for a double. If successful, writes the result to *value and
+// returns true; otherwise leaves *value unchanged and returns false.
+bool ParseDouble(const std::string& src_text, const char* str, double* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const double double_value = strtod(str, &end); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a double, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ *value = double_value;
+ return true;
+}
+
+// Parses 'str' into KV pairs. If successful, writes the result to *value and
+// returns true; otherwise leaves *value unchanged and returns false.
+bool ParseKvPairs(const std::string& src_text, const char* str,
+ std::map<std::string, std::string>* value) {
+ std::map<std::string, std::string> kvs;
+ for (const auto& kvpair : StrSplit(str, ',')) {
+ const auto kv = StrSplit(kvpair, '=');
+ if (kv.size() != 2) {
+ std::cerr << src_text << " is expected to be a comma-separated list of "
+ << "<key>=<value> strings, but actually has value \"" << str
+ << "\".\n";
+ return false;
+ }
+ if (!kvs.emplace(kv[0], kv[1]).second) {
+ std::cerr << src_text << " is expected to contain unique keys but key \""
+ << kv[0] << "\" was repeated.\n";
+ return false;
+ }
+ }
+
+ *value = kvs;
+ return true;
+}
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "BENCHMARK_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+ const std::string flag_str(flag);
+
+ std::string env_var;
+ for (size_t i = 0; i != flag_str.length(); ++i)
+ env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
+
+ return env_var;
+}
+
+} // namespace
+
+bool BoolFromEnv(const char* flag, bool default_val) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value_str = getenv(env_var.c_str());
+ return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
+}
+
+int32_t Int32FromEnv(const char* flag, int32_t default_val) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value_str = getenv(env_var.c_str());
+ int32_t value = default_val;
+ if (value_str == nullptr ||
+ !ParseInt32(std::string("Environment variable ") + env_var, value_str,
+ &value)) {
+ return default_val;
+ }
+ return value;
+}
+
+double DoubleFromEnv(const char* flag, double default_val) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value_str = getenv(env_var.c_str());
+ double value = default_val;
+ if (value_str == nullptr ||
+ !ParseDouble(std::string("Environment variable ") + env_var, value_str,
+ &value)) {
+ return default_val;
+ }
+ return value;
+}
+
+const char* StringFromEnv(const char* flag, const char* default_val) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value = getenv(env_var.c_str());
+ return value == nullptr ? default_val : value;
+}
+
+std::map<std::string, std::string> KvPairsFromEnv(
+ const char* flag, std::map<std::string, std::string> default_val) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value_str = getenv(env_var.c_str());
+
+ if (value_str == nullptr) return default_val;
+
+ std::map<std::string, std::string> value;
+ if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) {
+ return default_val;
+ }
+ return value;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or nullptr if the parsing failed.
+const char* ParseFlagValue(const char* str, const char* flag,
+ bool def_optional) {
+ // str and flag must not be nullptr.
+ if (str == nullptr || flag == nullptr) return nullptr;
+
+ // The flag must start with "--".
+ const std::string flag_str = std::string("--") + std::string(flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) return flag_end;
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return nullptr;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Converts the string value to a bool.
+ *value = IsTruthyFlagValue(value_str);
+ return true;
+}
+
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseDouble(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ *value = value_str;
+ return true;
+}
+
+bool ParseKeyValueFlag(const char* str, const char* flag,
+ std::map<std::string, std::string>* value) {
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ if (value_str == nullptr) return false;
+
+ for (const auto& kvpair : StrSplit(value_str, ',')) {
+ const auto kv = StrSplit(kvpair, '=');
+ if (kv.size() != 2) return false;
+ value->emplace(kv[0], kv[1]);
+ }
+
+ return true;
+}
+
+bool IsFlag(const char* str, const char* flag) {
+ return (ParseFlagValue(str, flag, true) != nullptr);
+}
+
+bool IsTruthyFlagValue(const std::string& value) {
+ if (value.size() == 1) {
+ char v = value[0];
+ return isalnum(v) &&
+ !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
+ } else if (!value.empty()) {
+ std::string value_lower(value);
+ std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
+ [](char c) { return static_cast<char>(::tolower(c)); });
+ return !(value_lower == "false" || value_lower == "no" ||
+ value_lower == "off");
+ } else
+ return true;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/commandlineflags.h b/contrib/libs/benchmark/src/commandlineflags.h
new file mode 100644
index 00000000000..5baaf11784d
--- /dev/null
+++ b/contrib/libs/benchmark/src/commandlineflags.h
@@ -0,0 +1,116 @@
+#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
+#define BENCHMARK_COMMANDLINEFLAGS_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+// Macro for referencing flags.
+#define FLAG(name) FLAGS_##name
+
+// Macros for declaring flags.
+#define BM_DECLARE_bool(name) extern bool FLAG(name)
+#define BM_DECLARE_int32(name) extern int32_t FLAG(name)
+#define BM_DECLARE_double(name) extern double FLAG(name)
+#define BM_DECLARE_string(name) extern std::string FLAG(name)
+#define BM_DECLARE_kvpairs(name) \
+ extern std::map<std::string, std::string> FLAG(name)
+
+// Macros for defining flags.
+#define BM_DEFINE_bool(name, default_val) \
+ bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
+#define BM_DEFINE_int32(name, default_val) \
+ int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
+#define BM_DEFINE_double(name, default_val) \
+ double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
+#define BM_DEFINE_string(name, default_val) \
+ std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
+#define BM_DEFINE_kvpairs(name, default_val) \
+ std::map<std::string, std::string> FLAG(name) = \
+ benchmark::KvPairsFromEnv(#name, default_val)
+
+namespace benchmark {
+
+// Parses a bool from the environment variable corresponding to the given flag.
+//
+// If the variable exists, returns IsTruthyFlagValue() value; if not,
+// returns the given default value.
+bool BoolFromEnv(const char* flag, bool default_val);
+
+// Parses an Int32 from the environment variable corresponding to the given
+// flag.
+//
+// If the variable exists, returns ParseInt32() value; if not, returns
+// the given default value.
+int32_t Int32FromEnv(const char* flag, int32_t default_val);
+
+// Parses an Double from the environment variable corresponding to the given
+// flag.
+//
+// If the variable exists, returns ParseDouble(); if not, returns
+// the given default value.
+double DoubleFromEnv(const char* flag, double default_val);
+
+// Parses a string from the environment variable corresponding to the given
+// flag.
+//
+// If variable exists, returns its value; if not, returns
+// the given default value.
+const char* StringFromEnv(const char* flag, const char* default_val);
+
+// Parses a set of kvpairs from the environment variable corresponding to the
+// given flag.
+//
+// If variable exists, returns its value; if not, returns
+// the given default value.
+std::map<std::string, std::string> KvPairsFromEnv(
+ const char* flag, std::map<std::string, std::string> default_val);
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true if it passes IsTruthyValue().
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
+
+// Parses a string for a Double flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseDoubleFlag(const char* str, const char* flag, double* value);
+
+// Parses a string for a string flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, std::string* value);
+
+// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value"
+//
+// On success, stores the value of the flag in *value and returns true. On
+// failure returns false, though *value may have been mutated.
+bool ParseKeyValueFlag(const char* str, const char* flag,
+ std::map<std::string, std::string>* value);
+
+// Returns true if the string matches the flag.
+bool IsFlag(const char* str, const char* flag);
+
+// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
+// some non-alphanumeric character. Also returns false if the value matches
+// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
+// returns true if value is the empty string.
+bool IsTruthyFlagValue(const std::string& value);
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_COMMANDLINEFLAGS_H_
diff --git a/contrib/libs/benchmark/src/complexity.cc b/contrib/libs/benchmark/src/complexity.cc
new file mode 100644
index 00000000000..825c57394a8
--- /dev/null
+++ b/contrib/libs/benchmark/src/complexity.cc
@@ -0,0 +1,244 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#include "complexity.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+
+namespace benchmark {
+
+// Internal function to calculate the different scalability forms
+BigOFunc* FittingCurve(BigO complexity) {
+ static const double kLog2E = 1.44269504088896340736;
+ switch (complexity) {
+ case oN:
+ return [](IterationCount n) -> double { return static_cast<double>(n); };
+ case oNSquared:
+ return [](IterationCount n) -> double { return std::pow(n, 2); };
+ case oNCubed:
+ return [](IterationCount n) -> double { return std::pow(n, 3); };
+ case oLogN:
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return
+ [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
+ case oNLogN:
+ /* Note: can't use log2 because Android's GNU STL lacks it */
+ return [](IterationCount n) {
+ return kLog2E * n * log(static_cast<double>(n));
+ };
+ case o1:
+ default:
+ return [](IterationCount) { return 1.0; };
+ }
+}
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity) {
+ switch (complexity) {
+ case oN:
+ return "N";
+ case oNSquared:
+ return "N^2";
+ case oNCubed:
+ return "N^3";
+ case oLogN:
+ return "lgN";
+ case oNLogN:
+ return "NlgN";
+ case o1:
+ return "(1)";
+ default:
+ return "f(N)";
+ }
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error, for the fitting curve
+// given by the lambda expression.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
+
+// For a deeper explanation on the algorithm logic, please refer to
+// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
+
+LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+ const std::vector<double>& time,
+ BigOFunc* fitting_curve) {
+ double sigma_gn_squared = 0.0;
+ double sigma_time = 0.0;
+ double sigma_time_gn = 0.0;
+
+ // Calculate least square fitting parameter
+ for (size_t i = 0; i < n.size(); ++i) {
+ double gn_i = fitting_curve(n[i]);
+ sigma_gn_squared += gn_i * gn_i;
+ sigma_time += time[i];
+ sigma_time_gn += time[i] * gn_i;
+ }
+
+ LeastSq result;
+ result.complexity = oLambda;
+
+ // Calculate complexity.
+ result.coef = sigma_time_gn / sigma_gn_squared;
+
+ // Calculate RMS
+ double rms = 0.0;
+ for (size_t i = 0; i < n.size(); ++i) {
+ double fit = result.coef * fitting_curve(n[i]);
+ rms += pow((time[i] - fit), 2);
+ }
+
+ // Normalized RMS by the mean of the observed values
+ double mean = sigma_time / n.size();
+ result.rms = sqrt(rms / n.size()) / mean;
+
+ return result;
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - complexity : If different than oAuto, the fitting curve will stick to
+// this one. If it is oAuto, it will be calculated the best
+// fitting curve.
+LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+ const std::vector<double>& time, const BigO complexity) {
+ BM_CHECK_EQ(n.size(), time.size());
+ BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
+ // benchmark runs are given
+ BM_CHECK_NE(complexity, oNone);
+
+ LeastSq best_fit;
+
+ if (complexity == oAuto) {
+ std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
+
+ // Take o1 as default best fitting curve
+ best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
+ best_fit.complexity = o1;
+
+ // Compute all possible fitting curves and stick to the best one
+ for (const auto& fit : fit_curves) {
+ LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
+ if (current_fit.rms < best_fit.rms) {
+ best_fit = current_fit;
+ best_fit.complexity = fit;
+ }
+ }
+ } else {
+ best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
+ best_fit.complexity = complexity;
+ }
+
+ return best_fit;
+}
+
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ if (reports.size() < 2) return results;
+
+ // Accumulators.
+ std::vector<int64_t> n;
+ std::vector<double> real_time;
+ std::vector<double> cpu_time;
+
+ // Populate the accumulators.
+ for (const Run& run : reports) {
+ BM_CHECK_GT(run.complexity_n, 0)
+ << "Did you forget to call SetComplexityN?";
+ n.push_back(run.complexity_n);
+ real_time.push_back(run.real_accumulated_time / run.iterations);
+ cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
+ }
+
+ LeastSq result_cpu;
+ LeastSq result_real;
+
+ if (reports[0].complexity == oLambda) {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
+ result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
+ } else {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
+ result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
+ }
+
+ // Drop the 'args' when reporting complexity.
+ auto run_name = reports[0].run_name;
+ run_name.args.clear();
+
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run big_o;
+ big_o.run_name = run_name;
+ big_o.family_index = reports[0].family_index;
+ big_o.per_family_instance_index = reports[0].per_family_instance_index;
+ big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ big_o.repetitions = reports[0].repetitions;
+ big_o.repetition_index = Run::no_repetition_index;
+ big_o.threads = reports[0].threads;
+ big_o.aggregate_name = "BigO";
+ big_o.aggregate_unit = StatisticUnit::kTime;
+ big_o.report_label = reports[0].report_label;
+ big_o.iterations = 0;
+ big_o.real_accumulated_time = result_real.coef;
+ big_o.cpu_accumulated_time = result_cpu.coef;
+ big_o.report_big_o = true;
+ big_o.complexity = result_cpu.complexity;
+
+ // All the time results are reported after being multiplied by the
+ // time unit multiplier. But since RMS is a relative quantity it
+ // should not be multiplied at all. So, here, we _divide_ it by the
+ // multiplier so that when it is multiplied later the result is the
+ // correct one.
+ double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
+
+ // Only add label to mean/stddev if it is same for all runs
+ Run rms;
+ rms.run_name = run_name;
+ rms.family_index = reports[0].family_index;
+ rms.per_family_instance_index = reports[0].per_family_instance_index;
+ rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ rms.aggregate_name = "RMS";
+ rms.aggregate_unit = StatisticUnit::kPercentage;
+ rms.report_label = big_o.report_label;
+ rms.iterations = 0;
+ rms.repetition_index = Run::no_repetition_index;
+ rms.repetitions = reports[0].repetitions;
+ rms.threads = reports[0].threads;
+ rms.real_accumulated_time = result_real.rms / multiplier;
+ rms.cpu_accumulated_time = result_cpu.rms / multiplier;
+ rms.report_rms = true;
+ rms.complexity = result_cpu.complexity;
+ // don't forget to keep the time unit, or we won't be able to
+ // recover the correct value.
+ rms.time_unit = reports[0].time_unit;
+
+ results.push_back(big_o);
+ results.push_back(rms);
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/complexity.h b/contrib/libs/benchmark/src/complexity.h
new file mode 100644
index 00000000000..df29b48d29b
--- /dev/null
+++ b/contrib/libs/benchmark/src/complexity.h
@@ -0,0 +1,55 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#ifndef COMPLEXITY_H_
+#define COMPLEXITY_H_
+
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the bigO and RMS information for the specified
+// list of reports. If 'reports.size() < 2' an empty vector is returned.
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+// This data structure will contain the result returned by MinimalLeastSq
+// - coef : Estimated coeficient for the high-order term as
+// interpolated from data.
+// - rms : Normalized Root Mean Squared Error.
+// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
+// form has been provided to MinimalLeastSq this will return
+// the same value. In case BigO::oAuto has been selected, this
+// parameter will return the best fitting curve detected.
+
+struct LeastSq {
+ LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
+
+ double coef;
+ double rms;
+ BigO complexity;
+};
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity);
+
+} // end namespace benchmark
+
+#endif // COMPLEXITY_H_
diff --git a/contrib/libs/benchmark/src/console_reporter.cc b/contrib/libs/benchmark/src/console_reporter.cc
new file mode 100644
index 00000000000..04cc0b74e58
--- /dev/null
+++ b/contrib/libs/benchmark/src/console_reporter.cc
@@ -0,0 +1,190 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+bool ConsoleReporter::ReportContext(const Context& context) {
+ name_field_width_ = context.name_field_width;
+ printed_header_ = false;
+ prev_counters_.clear();
+
+ PrintBasicContext(&GetErrorStream(), context);
+
+#ifdef BENCHMARK_OS_WINDOWS
+ if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
+ GetErrorStream()
+ << "Color printing is only supported for stdout on windows."
+ " Disabling color printing\n";
+ output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
+ }
+#endif
+
+ return true;
+}
+
+void ConsoleReporter::PrintHeader(const Run& run) {
+ std::string str =
+ FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
+ "Benchmark", "Time", "CPU", "Iterations");
+ if (!run.counters.empty()) {
+ if (output_options_ & OO_Tabular) {
+ for (auto const& c : run.counters) {
+ str += FormatString(" %10s", c.first.c_str());
+ }
+ } else {
+ str += " UserCounters...";
+ }
+ }
+ std::string line = std::string(str.length(), '-');
+ GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
+}
+
+void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
+ for (const auto& run : reports) {
+ // print the header:
+ // --- if none was printed yet
+ bool print_header = !printed_header_;
+ // --- or if the format is tabular and this run
+ // has different fields from the prev header
+ print_header |= (output_options_ & OO_Tabular) &&
+ (!internal::SameNames(run.counters, prev_counters_));
+ if (print_header) {
+ printed_header_ = true;
+ prev_counters_ = run.counters;
+ PrintHeader(run);
+ }
+ // As an alternative to printing the headers like this, we could sort
+ // the benchmarks by header and then print. But this would require
+ // waiting for the full results before printing, or printing twice.
+ PrintRunData(run);
+ }
+}
+
+static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
+ ...) {
+ va_list args;
+ va_start(args, fmt);
+ out << FormatString(fmt, args);
+ va_end(args);
+}
+
+static std::string FormatTime(double time) {
+ // Align decimal places...
+ if (time < 1.0) {
+ return FormatString("%10.3f", time);
+ }
+ if (time < 10.0) {
+ return FormatString("%10.2f", time);
+ }
+ if (time < 100.0) {
+ return FormatString("%10.1f", time);
+ }
+ return FormatString("%10.0f", time);
+}
+
+void ConsoleReporter::PrintRunData(const Run& result) {
+ typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
+ auto& Out = GetOutputStream();
+ PrinterFn* printer = (output_options_ & OO_Color)
+ ? static_cast<PrinterFn*>(ColorPrintf)
+ : IgnoreColorPrint;
+ auto name_color =
+ (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
+ printer(Out, name_color, "%-*s ", name_field_width_,
+ result.benchmark_name().c_str());
+
+ if (result.error_occurred) {
+ printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
+ result.error_message.c_str());
+ printer(Out, COLOR_DEFAULT, "\n");
+ return;
+ }
+
+ const double real_time = result.GetAdjustedRealTime();
+ const double cpu_time = result.GetAdjustedCPUTime();
+ const std::string real_time_str = FormatTime(real_time);
+ const std::string cpu_time_str = FormatTime(cpu_time);
+
+ if (result.report_big_o) {
+ std::string big_o = GetBigOString(result.complexity);
+ printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time,
+ big_o.c_str(), cpu_time, big_o.c_str());
+ } else if (result.report_rms) {
+ printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
+ cpu_time * 100, "%");
+ } else if (result.run_type != Run::RT_Aggregate ||
+ result.aggregate_unit == StatisticUnit::kTime) {
+ const char* timeLabel = GetTimeUnitString(result.time_unit);
+ printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(),
+ timeLabel, cpu_time_str.c_str(), timeLabel);
+ } else {
+ assert(result.aggregate_unit == StatisticUnit::kPercentage);
+ printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ",
+ (100. * result.real_accumulated_time), "%",
+ (100. * result.cpu_accumulated_time), "%");
+ }
+
+ if (!result.report_big_o && !result.report_rms) {
+ printer(Out, COLOR_CYAN, "%10lld", result.iterations);
+ }
+
+ for (auto& c : result.counters) {
+ const std::size_t cNameLen =
+ std::max(std::string::size_type(10), c.first.length());
+ std::string s;
+ const char* unit = "";
+ if (result.run_type == Run::RT_Aggregate &&
+ result.aggregate_unit == StatisticUnit::kPercentage) {
+ s = StrFormat("%.2f", 100. * c.second.value);
+ unit = "%";
+ } else {
+ s = HumanReadableNumber(c.second.value, c.second.oneK);
+ if (c.second.flags & Counter::kIsRate)
+ unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
+ }
+ if (output_options_ & OO_Tabular) {
+ printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
+ unit);
+ } else {
+ printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
+ }
+ }
+
+ if (!result.report_label.empty()) {
+ printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
+ }
+
+ printer(Out, COLOR_DEFAULT, "\n");
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/counter.cc b/contrib/libs/benchmark/src/counter.cc
new file mode 100644
index 00000000000..cf5b78ee3ac
--- /dev/null
+++ b/contrib/libs/benchmark/src/counter.cc
@@ -0,0 +1,80 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "counter.h"
+
+namespace benchmark {
+namespace internal {
+
+double Finish(Counter const& c, IterationCount iterations, double cpu_time,
+ double num_threads) {
+ double v = c.value;
+ if (c.flags & Counter::kIsRate) {
+ v /= cpu_time;
+ }
+ if (c.flags & Counter::kAvgThreads) {
+ v /= num_threads;
+ }
+ if (c.flags & Counter::kIsIterationInvariant) {
+ v *= iterations;
+ }
+ if (c.flags & Counter::kAvgIterations) {
+ v /= iterations;
+ }
+
+ if (c.flags & Counter::kInvert) { // Invert is *always* last.
+ v = 1.0 / v;
+ }
+ return v;
+}
+
+void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
+ double num_threads) {
+ for (auto& c : *l) {
+ c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
+ }
+}
+
+void Increment(UserCounters* l, UserCounters const& r) {
+ // add counters present in both or just in *l
+ for (auto& c : *l) {
+ auto it = r.find(c.first);
+ if (it != r.end()) {
+ c.second.value = c.second + it->second;
+ }
+ }
+ // add counters present in r, but not in *l
+ for (auto const& tc : r) {
+ auto it = l->find(tc.first);
+ if (it == l->end()) {
+ (*l)[tc.first] = tc.second;
+ }
+ }
+}
+
+bool SameNames(UserCounters const& l, UserCounters const& r) {
+ if (&l == &r) return true;
+ if (l.size() != r.size()) {
+ return false;
+ }
+ for (auto const& c : l) {
+ if (r.find(c.first) == r.end()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // end namespace internal
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/counter.h b/contrib/libs/benchmark/src/counter.h
new file mode 100644
index 00000000000..1f5a58e31f0
--- /dev/null
+++ b/contrib/libs/benchmark/src/counter.h
@@ -0,0 +1,32 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_COUNTER_H_
+#define BENCHMARK_COUNTER_H_
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// these counter-related functions are hidden to reduce API surface.
+namespace internal {
+void Finish(UserCounters* l, IterationCount iterations, double time,
+ double num_threads);
+void Increment(UserCounters* l, UserCounters const& r);
+bool SameNames(UserCounters const& l, UserCounters const& r);
+} // end namespace internal
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_COUNTER_H_
diff --git a/contrib/libs/benchmark/src/csv_reporter.cc b/contrib/libs/benchmark/src/csv_reporter.cc
new file mode 100644
index 00000000000..1c5e9fa6689
--- /dev/null
+++ b/contrib/libs/benchmark/src/csv_reporter.cc
@@ -0,0 +1,158 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+#include "complexity.h"
+#include "string_util.h"
+#include "timers.h"
+
+// File format reference: http://edoceo.com/utilitas/csv-file-format.
+
+namespace benchmark {
+
+namespace {
+std::vector<std::string> elements = {
+ "name", "iterations", "real_time", "cpu_time",
+ "time_unit", "bytes_per_second", "items_per_second", "label",
+ "error_occurred", "error_message"};
+} // namespace
+
+std::string CsvEscape(const std::string& s) {
+ std::string tmp;
+ tmp.reserve(s.size() + 2);
+ for (char c : s) {
+ switch (c) {
+ case '"':
+ tmp += "\"\"";
+ break;
+ default:
+ tmp += c;
+ break;
+ }
+ }
+ return '"' + tmp + '"';
+}
+
+bool CSVReporter::ReportContext(const Context& context) {
+ PrintBasicContext(&GetErrorStream(), context);
+ return true;
+}
+
+void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
+ std::ostream& Out = GetOutputStream();
+
+ if (!printed_header_) {
+ // save the names of all the user counters
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
+ user_counter_names_.insert(cnt.first);
+ }
+ }
+
+ // print the header
+ for (auto B = elements.begin(); B != elements.end();) {
+ Out << *B++;
+ if (B != elements.end()) Out << ",";
+ }
+ for (auto B = user_counter_names_.begin();
+ B != user_counter_names_.end();) {
+ Out << ",\"" << *B++ << "\"";
+ }
+ Out << "\n";
+
+ printed_header_ = true;
+ } else {
+ // check that all the current counters are saved in the name set
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
+ BM_CHECK(user_counter_names_.find(cnt.first) !=
+ user_counter_names_.end())
+ << "All counters must be present in each run. "
+ << "Counter named \"" << cnt.first
+ << "\" was not in a run after being added to the header";
+ }
+ }
+ }
+
+ // print results for each run
+ for (const auto& run : reports) {
+ PrintRunData(run);
+ }
+}
+
+void CSVReporter::PrintRunData(const Run& run) {
+ std::ostream& Out = GetOutputStream();
+ Out << CsvEscape(run.benchmark_name()) << ",";
+ if (run.error_occurred) {
+ Out << std::string(elements.size() - 3, ',');
+ Out << "true,";
+ Out << CsvEscape(run.error_message) << "\n";
+ return;
+ }
+
+ // Do not print iteration on bigO and RMS report
+ if (!run.report_big_o && !run.report_rms) {
+ Out << run.iterations;
+ }
+ Out << ",";
+
+ Out << run.GetAdjustedRealTime() << ",";
+ Out << run.GetAdjustedCPUTime() << ",";
+
+ // Do not print timeLabel on bigO and RMS report
+ if (run.report_big_o) {
+ Out << GetBigOString(run.complexity);
+ } else if (!run.report_rms) {
+ Out << GetTimeUnitString(run.time_unit);
+ }
+ Out << ",";
+
+ if (run.counters.find("bytes_per_second") != run.counters.end()) {
+ Out << run.counters.at("bytes_per_second");
+ }
+ Out << ",";
+ if (run.counters.find("items_per_second") != run.counters.end()) {
+ Out << run.counters.at("items_per_second");
+ }
+ Out << ",";
+ if (!run.report_label.empty()) {
+ Out << CsvEscape(run.report_label);
+ }
+ Out << ",,"; // for error_occurred and error_message
+
+ // Print user counters
+ for (const auto& ucn : user_counter_names_) {
+ auto it = run.counters.find(ucn);
+ if (it == run.counters.end()) {
+ Out << ",";
+ } else {
+ Out << "," << it->second;
+ }
+ }
+ Out << '\n';
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/cycleclock.h b/contrib/libs/benchmark/src/cycleclock.h
new file mode 100644
index 00000000000..07dfdd2ec49
--- /dev/null
+++ b/contrib/libs/benchmark/src/cycleclock.h
@@ -0,0 +1,225 @@
+// ----------------------------------------------------------------------
+// CycleClock
+// A CycleClock tells you the current time in Cycles. The "time"
+// is actually time since power-on. This is like time() but doesn't
+// involve a system call and is much more precise.
+//
+// NOTE: Not all cpu/platform/kernel combinations guarantee that this
+// clock increments at a constant rate or is synchronized across all logical
+// cpus in a system.
+//
+// If you need the above guarantees, please consider using a different
+// API. There are efforts to provide an interface which provides a millisecond
+// granularity and implemented as a memory read. A memory read is generally
+// cheaper than the CycleClock for many architectures.
+//
+// Also, in some out of order CPU implementations, the CycleClock is not
+// serializing. So if you're trying to count at cycles granularity, your
+// data might be inaccurate due to out of order instruction execution.
+// ----------------------------------------------------------------------
+
+#ifndef BENCHMARK_CYCLECLOCK_H_
+#define BENCHMARK_CYCLECLOCK_H_
+
+#include <cstdint>
+
+#include "benchmark/benchmark.h"
+#include "internal_macros.h"
+
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_time.h>
+#endif
+// For MSVC, we want to use '_asm rdtsc' when possible (since it works
+// with even ancient MSVC compilers), and when not possible the
+// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
+// environments, <windows.h> and <intrin.h> have conflicting
+// declarations of some other intrinsics, breaking compilation.
+// Therefore, we simply declare __rdtsc ourselves. See also
+// http://connect.microsoft.com/VisualStudio/feedback/details/262047
+#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64)
+extern "C" uint64_t __rdtsc();
+#pragma intrinsic(__rdtsc)
+#endif
+
+#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#error #include <emscripten.h>
+#endif
+
+namespace benchmark {
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+// http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b. See also
+// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+namespace cycleclock {
+// This should return the number of cycles since power-on. Thread-safe.
+inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
+#if defined(BENCHMARK_OS_MACOSX)
+ // this goes at the top because we need ALL Macs, regardless of
+ // architecture, to return the number of "mach time units" that
+ // have passed since startup. See sysinfo.cc where
+ // InitializeSystemInfo() sets the supposed cpu clock frequency of
+ // macs to the number of mach time units per second, not actual
+ // CPU clock frequency (which can change in the face of CPU
+ // frequency scaling). Also note that when the Mac sleeps, this
+ // counter pauses; it does not continue counting, nor does it
+ // reset to zero.
+ return mach_absolute_time();
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // this goes above x86-specific code because old versions of Emscripten
+ // define __x86_64__, although they have nothing to do with it.
+ return static_cast<int64_t>(emscripten_get_now() * 1e+6);
+#elif defined(__i386__)
+ int64_t ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ return ret;
+#elif defined(__x86_64__) || defined(__amd64__)
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+#elif defined(__powerpc__) || defined(__ppc__)
+ // This returns a time-base, which is not always precisely a cycle-count.
+#if defined(__powerpc64__) || defined(__ppc64__)
+ int64_t tb;
+ asm volatile("mfspr %0, 268" : "=r"(tb));
+ return tb;
+#else
+ uint32_t tbl, tbu0, tbu1;
+ asm volatile(
+ "mftbu %0\n"
+ "mftb %1\n"
+ "mftbu %2"
+ : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
+ tbl &= -static_cast<int32_t>(tbu0 == tbu1);
+ // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
+ return (static_cast<uint64_t>(tbu1) << 32) | tbl;
+#endif
+#elif defined(__sparc__)
+ int64_t tick;
+ asm(".byte 0x83, 0x41, 0x00, 0x00");
+ asm("mov %%g1, %0" : "=r"(tick));
+ return tick;
+#elif defined(__ia64__)
+ int64_t itc;
+ asm("mov %0 = ar.itc" : "=r"(itc));
+ return itc;
+#elif defined(COMPILER_MSVC) && defined(_M_IX86)
+ // Older MSVC compilers (like 7.x) don't seem to support the
+ // __rdtsc intrinsic properly, so I prefer to use _asm instead
+ // when I know it will work. Otherwise, I'll use __rdtsc and hope
+ // the code is being compiled with a non-ancient compiler.
+ _asm rdtsc
+#elif defined(COMPILER_MSVC) && defined(_M_ARM64)
+ // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics
+ // and https://reviews.llvm.org/D53115
+ int64_t virtual_timer_value;
+ virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT);
+ return virtual_timer_value;
+#elif defined(COMPILER_MSVC)
+ return __rdtsc();
+#elif defined(BENCHMARK_OS_NACL)
+ // Native Client validator on x86/x86-64 allows RDTSC instructions,
+ // and this case is handled above. Native Client validator on ARM
+ // rejects MRC instructions (used in the ARM-specific sequence below),
+ // so we handle it here. Portable Native Client compiles to
+ // architecture-agnostic bytecode, which doesn't provide any
+ // cycle counter access mnemonics.
+
+ // Native Client does not provide any API to access cycle counter.
+ // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
+ // because is provides nanosecond resolution (which is noticable at
+ // least for PNaCl modules running on x86 Mac & Linux).
+ // Initialize to always return 0 if clock_gettime fails.
+ struct timespec ts = {0, 0};
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
+#elif defined(__aarch64__)
+ // System timer of ARMv8 runs at a different frequency than the CPU's.
+ // The frequency is fixed, typically in the range 1-50MHz. It can be
+ // read at CNTFRQ special register. We assume the OS has set up
+ // the virtual timer properly.
+ int64_t virtual_timer_value;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+#elif defined(__ARM_ARCH)
+ // V6 is the earliest arch that has a standard cyclecount
+ // Native Client validator doesn't allow MRC instructions.
+#if (__ARM_ARCH >= 6)
+ uint32_t pmccntr;
+ uint32_t pmuseren;
+ uint32_t pmcntenset;
+ // Read the user mode perf monitor counter access permissions.
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
+ if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
+ if (pmcntenset & 0x80000000ul) { // Is it counting?
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
+ // The counter is set up to count every 64th cycle
+ return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
+ }
+ }
+#endif
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__mips__) || defined(__m68k__)
+ // mips apparently only allows rdtsc for superusers, so we fall
+ // back to gettimeofday. It's possible clock_gettime would be better.
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__loongarch__)
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__s390__) // Covers both s390 and s390x.
+ // Return the CPU clock.
+ uint64_t tsc;
+#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL)
+ // z/OS XL compiler HLASM syntax.
+ asm(" stck %0" : "=m"(tsc) : : "cc");
+#else
+ asm("stck %0" : "=Q"(tsc) : : "cc");
+#endif
+ return tsc;
+#elif defined(__riscv) // RISC-V
+ // Use RDCYCLE (and RDCYCLEH on riscv32)
+#if __riscv_xlen == 32
+ uint32_t cycles_lo, cycles_hi0, cycles_hi1;
+ // This asm also includes the PowerPC overflow handling strategy, as above.
+ // Implemented in assembly because Clang insisted on branching.
+ asm volatile(
+ "rdcycleh %0\n"
+ "rdcycle %1\n"
+ "rdcycleh %2\n"
+ "sub %0, %0, %2\n"
+ "seqz %0, %0\n"
+ "sub %0, zero, %0\n"
+ "and %1, %1, %0\n"
+ : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
+ return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
+#else
+ uint64_t cycles;
+ asm volatile("rdcycle %0" : "=r"(cycles));
+ return cycles;
+#endif
+#elif defined(__e2k__) || defined(__elbrus__)
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#else
+// The soft failover to a generic implementation is automatic only for ARM.
+// For other platforms the developer is expected to make an attempt to create
+// a fast implementation and use generic version if nothing better is available.
+#error You need to define CycleTimer for your OS and CPU
+#endif
+}
+} // end namespace cycleclock
+} // end namespace benchmark
+
+#endif // BENCHMARK_CYCLECLOCK_H_
diff --git a/contrib/libs/benchmark/src/internal_macros.h b/contrib/libs/benchmark/src/internal_macros.h
new file mode 100644
index 00000000000..91f367b894b
--- /dev/null
+++ b/contrib/libs/benchmark/src/internal_macros.h
@@ -0,0 +1,102 @@
+#ifndef BENCHMARK_INTERNAL_MACROS_H_
+#define BENCHMARK_INTERNAL_MACROS_H_
+
+#include "benchmark/benchmark.h"
+
+/* Needed to detect STL */
+#include <cstdlib>
+
+// clang-format off
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#if defined(__clang__)
+ #if defined(__ibmxl__)
+ #if !defined(COMPILER_IBMXL)
+ #define COMPILER_IBMXL
+ #endif
+ #elif !defined(COMPILER_CLANG)
+ #define COMPILER_CLANG
+ #endif
+#elif defined(_MSC_VER)
+ #if !defined(COMPILER_MSVC)
+ #define COMPILER_MSVC
+ #endif
+#elif defined(__GNUC__)
+ #if !defined(COMPILER_GCC)
+ #define COMPILER_GCC
+ #endif
+#endif
+
+#if __has_feature(cxx_attributes)
+ #define BENCHMARK_NORETURN [[noreturn]]
+#elif defined(__GNUC__)
+ #define BENCHMARK_NORETURN __attribute__((noreturn))
+#elif defined(COMPILER_MSVC)
+ #define BENCHMARK_NORETURN __declspec(noreturn)
+#else
+ #define BENCHMARK_NORETURN
+#endif
+
+#if defined(__CYGWIN__)
+ #define BENCHMARK_OS_CYGWIN 1
+#elif defined(_WIN32)
+ #define BENCHMARK_OS_WINDOWS 1
+ #if defined(__MINGW32__)
+ #define BENCHMARK_OS_MINGW 1
+ #endif
+#elif defined(__APPLE__)
+ #define BENCHMARK_OS_APPLE 1
+ #include "TargetConditionals.h"
+ #if defined(TARGET_OS_MAC)
+ #define BENCHMARK_OS_MACOSX 1
+ #if defined(TARGET_OS_IPHONE)
+ #define BENCHMARK_OS_IOS 1
+ #endif
+ #endif
+#elif defined(__FreeBSD__)
+ #define BENCHMARK_OS_FREEBSD 1
+#elif defined(__NetBSD__)
+ #define BENCHMARK_OS_NETBSD 1
+#elif defined(__OpenBSD__)
+ #define BENCHMARK_OS_OPENBSD 1
+#elif defined(__DragonFly__)
+ #define BENCHMARK_OS_DRAGONFLY 1
+#elif defined(__linux__)
+ #define BENCHMARK_OS_LINUX 1
+#elif defined(__native_client__)
+ #define BENCHMARK_OS_NACL 1
+#elif defined(__EMSCRIPTEN__)
+ #define BENCHMARK_OS_EMSCRIPTEN 1
+#elif defined(__rtems__)
+ #define BENCHMARK_OS_RTEMS 1
+#elif defined(__Fuchsia__)
+#define BENCHMARK_OS_FUCHSIA 1
+#elif defined (__SVR4) && defined (__sun)
+#define BENCHMARK_OS_SOLARIS 1
+#elif defined(__QNX__)
+#define BENCHMARK_OS_QNX 1
+#elif defined(__MVS__)
+#define BENCHMARK_OS_ZOS 1
+#endif
+
+#if defined(__ANDROID__) && defined(__GLIBCXX__)
+#define BENCHMARK_STL_ANDROID_GNUSTL 1
+#endif
+
+#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
+ && !defined(__EXCEPTIONS)
+ #define BENCHMARK_HAS_NO_EXCEPTIONS
+#endif
+
+#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
+ #define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
+#else
+ #define BENCHMARK_MAYBE_UNUSED
+#endif
+
+// clang-format on
+
+#endif // BENCHMARK_INTERNAL_MACROS_H_
diff --git a/contrib/libs/benchmark/src/json_reporter.cc b/contrib/libs/benchmark/src/json_reporter.cc
new file mode 100644
index 00000000000..e84a4ed24f9
--- /dev/null
+++ b/contrib/libs/benchmark/src/json_reporter.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <iomanip> // for setprecision
+#include <iostream>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+namespace internal {
+extern std::map<std::string, std::string>* global_context;
+}
+
+namespace {
+
+std::string StrEscape(const std::string& s) {
+ std::string tmp;
+ tmp.reserve(s.size());
+ for (char c : s) {
+ switch (c) {
+ case '\b':
+ tmp += "\\b";
+ break;
+ case '\f':
+ tmp += "\\f";
+ break;
+ case '\n':
+ tmp += "\\n";
+ break;
+ case '\r':
+ tmp += "\\r";
+ break;
+ case '\t':
+ tmp += "\\t";
+ break;
+ case '\\':
+ tmp += "\\\\";
+ break;
+ case '"':
+ tmp += "\\\"";
+ break;
+ default:
+ tmp += c;
+ break;
+ }
+ }
+ return tmp;
+}
+
+std::string FormatKV(std::string const& key, std::string const& value) {
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(),
+ StrEscape(value).c_str());
+}
+
+std::string FormatKV(std::string const& key, const char* value) {
+ return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(),
+ StrEscape(value).c_str());
+}
+
+std::string FormatKV(std::string const& key, bool value) {
+ return StrFormat("\"%s\": %s", StrEscape(key).c_str(),
+ value ? "true" : "false");
+}
+
+std::string FormatKV(std::string const& key, int64_t value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": " << value;
+ return ss.str();
+}
+
+std::string FormatKV(std::string const& key, IterationCount value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": " << value;
+ return ss.str();
+}
+
+std::string FormatKV(std::string const& key, double value) {
+ std::stringstream ss;
+ ss << '"' << StrEscape(key) << "\": ";
+
+ if (std::isnan(value))
+ ss << (value < 0 ? "-" : "") << "NaN";
+ else if (std::isinf(value))
+ ss << (value < 0 ? "-" : "") << "Infinity";
+ else {
+ const auto max_digits10 =
+ std::numeric_limits<decltype(value)>::max_digits10;
+ const auto max_fractional_digits10 = max_digits10 - 1;
+ ss << std::scientific << std::setprecision(max_fractional_digits10)
+ << value;
+ }
+ return ss.str();
+}
+
+int64_t RoundDouble(double v) { return std::lround(v); }
+
+} // end namespace
+
+bool JSONReporter::ReportContext(const Context& context) {
+ std::ostream& out = GetOutputStream();
+
+ out << "{\n";
+ std::string inner_indent(2, ' ');
+
+ // Open context block and print context information.
+ out << inner_indent << "\"context\": {\n";
+ std::string indent(4, ' ');
+
+ std::string walltime_value = LocalDateTimeString();
+ out << indent << FormatKV("date", walltime_value) << ",\n";
+
+ out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
+
+ if (Context::executable_name) {
+ out << indent << FormatKV("executable", Context::executable_name) << ",\n";
+ }
+
+ CPUInfo const& info = context.cpu_info;
+ out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
+ << ",\n";
+ out << indent
+ << FormatKV("mhz_per_cpu",
+ RoundDouble(info.cycles_per_second / 1000000.0))
+ << ",\n";
+ if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
+ out << indent
+ << FormatKV("cpu_scaling_enabled",
+ info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
+ << ",\n";
+ }
+
+ out << indent << "\"caches\": [\n";
+ indent = std::string(6, ' ');
+ std::string cache_indent(8, ' ');
+ for (size_t i = 0; i < info.caches.size(); ++i) {
+ auto& CI = info.caches[i];
+ out << indent << "{\n";
+ out << cache_indent << FormatKV("type", CI.type) << ",\n";
+ out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
+ << ",\n";
+ out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size))
+ << ",\n";
+ out << cache_indent
+ << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
+ << "\n";
+ out << indent << "}";
+ if (i != info.caches.size() - 1) out << ",";
+ out << "\n";
+ }
+ indent = std::string(4, ' ');
+ out << indent << "],\n";
+ out << indent << "\"load_avg\": [";
+ for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
+ out << *it++;
+ if (it != info.load_avg.end()) out << ",";
+ }
+ out << "],\n";
+
+#if defined(NDEBUG)
+ const char build_type[] = "release";
+#else
+ const char build_type[] = "debug";
+#endif
+ out << indent << FormatKV("library_build_type", build_type);
+
+ if (internal::global_context != nullptr) {
+ for (const auto& kv : *internal::global_context) {
+ out << ",\n";
+ out << indent << FormatKV(kv.first, kv.second);
+ }
+ }
+ out << "\n";
+
+ // Close context block and open the list of benchmarks.
+ out << inner_indent << "},\n";
+ out << inner_indent << "\"benchmarks\": [\n";
+ return true;
+}
+
+void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
+ if (reports.empty()) {
+ return;
+ }
+ std::string indent(4, ' ');
+ std::ostream& out = GetOutputStream();
+ if (!first_report_) {
+ out << ",\n";
+ }
+ first_report_ = false;
+
+ for (auto it = reports.begin(); it != reports.end(); ++it) {
+ out << indent << "{\n";
+ PrintRunData(*it);
+ out << indent << '}';
+ auto it_cp = it;
+ if (++it_cp != reports.end()) {
+ out << ",\n";
+ }
+ }
+}
+
+void JSONReporter::Finalize() {
+ // Close the list of benchmarks and the top level object.
+ GetOutputStream() << "\n ]\n}\n";
+}
+
+void JSONReporter::PrintRunData(Run const& run) {
+ std::string indent(6, ' ');
+ std::ostream& out = GetOutputStream();
+ out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
+ out << indent << FormatKV("family_index", run.family_index) << ",\n";
+ out << indent
+ << FormatKV("per_family_instance_index", run.per_family_instance_index)
+ << ",\n";
+ out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
+ out << indent << FormatKV("run_type", [&run]() -> const char* {
+ switch (run.run_type) {
+ case BenchmarkReporter::Run::RT_Iteration:
+ return "iteration";
+ case BenchmarkReporter::Run::RT_Aggregate:
+ return "aggregate";
+ }
+ BENCHMARK_UNREACHABLE();
+ }()) << ",\n";
+ out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
+ if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("repetition_index", run.repetition_index)
+ << ",\n";
+ }
+ out << indent << FormatKV("threads", run.threads) << ",\n";
+ if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
+ out << indent << FormatKV("aggregate_unit", [&run]() -> const char* {
+ switch (run.aggregate_unit) {
+ case StatisticUnit::kTime:
+ return "time";
+ case StatisticUnit::kPercentage:
+ return "percentage";
+ }
+ BENCHMARK_UNREACHABLE();
+ }()) << ",\n";
+ }
+ if (run.error_occurred) {
+ out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
+ out << indent << FormatKV("error_message", run.error_message) << ",\n";
+ }
+ if (!run.report_big_o && !run.report_rms) {
+ out << indent << FormatKV("iterations", run.iterations) << ",\n";
+ if (run.run_type != Run::RT_Aggregate ||
+ run.aggregate_unit == StatisticUnit::kTime) {
+ out << indent << FormatKV("real_time", run.GetAdjustedRealTime())
+ << ",\n";
+ out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
+ } else {
+ assert(run.aggregate_unit == StatisticUnit::kPercentage);
+ out << indent << FormatKV("real_time", run.real_accumulated_time)
+ << ",\n";
+ out << indent << FormatKV("cpu_time", run.cpu_accumulated_time);
+ }
+ out << ",\n"
+ << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_big_o) {
+ out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
+ << ",\n";
+ out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
+ << ",\n";
+ out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
+ out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_rms) {
+ out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
+ }
+
+ for (auto& c : run.counters) {
+ out << ",\n" << indent << FormatKV(c.first, c.second);
+ }
+
+ if (run.memory_result) {
+ const MemoryManager::Result memory_result = *run.memory_result;
+ out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
+ out << ",\n"
+ << indent << FormatKV("max_bytes_used", memory_result.max_bytes_used);
+
+ auto report_if_present = [&out, &indent](const char* label, int64_t val) {
+ if (val != MemoryManager::TombstoneValue)
+ out << ",\n" << indent << FormatKV(label, val);
+ };
+
+ report_if_present("total_allocated_bytes",
+ memory_result.total_allocated_bytes);
+ report_if_present("net_heap_growth", memory_result.net_heap_growth);
+ }
+
+ if (!run.report_label.empty()) {
+ out << ",\n" << indent << FormatKV("label", run.report_label);
+ }
+ out << '\n';
+}
+
+const int64_t MemoryManager::TombstoneValue =
+ std::numeric_limits<int64_t>::max();
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/log.h b/contrib/libs/benchmark/src/log.h
new file mode 100644
index 00000000000..48c071aded8
--- /dev/null
+++ b/contrib/libs/benchmark/src/log.h
@@ -0,0 +1,74 @@
+#ifndef BENCHMARK_LOG_H_
+#define BENCHMARK_LOG_H_
+
+#include <iostream>
+#include <ostream>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
+
+class LogType {
+ friend LogType& GetNullLogInstance();
+ friend LogType& GetErrorLogInstance();
+
+ // FIXME: Add locking to output.
+ template <class Tp>
+ friend LogType& operator<<(LogType&, Tp const&);
+ friend LogType& operator<<(LogType&, EndLType*);
+
+ private:
+ LogType(std::ostream* out) : out_(out) {}
+ std::ostream* out_;
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
+};
+
+template <class Tp>
+LogType& operator<<(LogType& log, Tp const& value) {
+ if (log.out_) {
+ *log.out_ << value;
+ }
+ return log;
+}
+
+inline LogType& operator<<(LogType& log, EndLType* m) {
+ if (log.out_) {
+ *log.out_ << m;
+ }
+ return log;
+}
+
+inline int& LogLevel() {
+ static int log_level = 0;
+ return log_level;
+}
+
+inline LogType& GetNullLogInstance() {
+ static LogType log(nullptr);
+ return log;
+}
+
+inline LogType& GetErrorLogInstance() {
+ static LogType log(&std::clog);
+ return log;
+}
+
+inline LogType& GetLogInstanceForLevel(int level) {
+ if (level <= LogLevel()) {
+ return GetErrorLogInstance();
+ }
+ return GetNullLogInstance();
+}
+
+} // end namespace internal
+} // end namespace benchmark
+
+// clang-format off
+#define BM_VLOG(x) \
+ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
+ " ")
+// clang-format on
+#endif
diff --git a/contrib/libs/benchmark/src/mutex.h b/contrib/libs/benchmark/src/mutex.h
new file mode 100644
index 00000000000..bec78d9e5f9
--- /dev/null
+++ b/contrib/libs/benchmark/src/mutex.h
@@ -0,0 +1,155 @@
+#ifndef BENCHMARK_MUTEX_H_
+#define BENCHMARK_MUTEX_H_
+
+#include <condition_variable>
+#include <mutex>
+
+#include "check.h"
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely erased when compiling with other compilers.
+#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+
+namespace benchmark {
+
+typedef std::condition_variable Condition;
+
+// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
+// we can annotate them with thread safety attributes and use the
+// -Wthread-safety warning with clang. The standard library types cannot be
+// used directly because they do not provide the required annotations.
+class CAPABILITY("mutex") Mutex {
+ public:
+ Mutex() {}
+
+ void lock() ACQUIRE() { mut_.lock(); }
+ void unlock() RELEASE() { mut_.unlock(); }
+ std::mutex& native_handle() { return mut_; }
+
+ private:
+ std::mutex mut_;
+};
+
+class SCOPED_CAPABILITY MutexLock {
+ typedef std::unique_lock<std::mutex> MutexLockImp;
+
+ public:
+ MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
+ ~MutexLock() RELEASE() {}
+ MutexLockImp& native_handle() { return ml_; }
+
+ private:
+ MutexLockImp ml_;
+};
+
+class Barrier {
+ public:
+ Barrier(int num_threads) : running_threads_(num_threads) {}
+
+ // Called by each thread
+ bool wait() EXCLUDES(lock_) {
+ bool last_thread = false;
+ {
+ MutexLock ml(lock_);
+ last_thread = createBarrier(ml);
+ }
+ if (last_thread) phase_condition_.notify_all();
+ return last_thread;
+ }
+
+ void removeThread() EXCLUDES(lock_) {
+ MutexLock ml(lock_);
+ --running_threads_;
+ if (entered_ != 0) phase_condition_.notify_all();
+ }
+
+ private:
+ Mutex lock_;
+ Condition phase_condition_;
+ int running_threads_;
+
+ // State for barrier management
+ int phase_number_ = 0;
+ int entered_ = 0; // Number of threads that have entered this barrier
+
+ // Enter the barrier and wait until all other threads have also
+ // entered the barrier. Returns iff this is the last thread to
+ // enter the barrier.
+ bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
+ BM_CHECK_LT(entered_, running_threads_);
+ entered_++;
+ if (entered_ < running_threads_) {
+ // Wait for all threads to enter
+ int phase_number_cp = phase_number_;
+ auto cb = [this, phase_number_cp]() {
+ return this->phase_number_ > phase_number_cp ||
+ entered_ == running_threads_; // A thread has aborted in error
+ };
+ phase_condition_.wait(ml.native_handle(), cb);
+ if (phase_number_ > phase_number_cp) return false;
+ // else (running_threads_ == entered_) and we are the last thread.
+ }
+ // Last thread has reached the barrier
+ phase_number_++;
+ entered_ = 0;
+ return true;
+ }
+};
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_MUTEX_H_
diff --git a/contrib/libs/benchmark/src/perf_counters.cc b/contrib/libs/benchmark/src/perf_counters.cc
new file mode 100644
index 00000000000..5a362c1c45c
--- /dev/null
+++ b/contrib/libs/benchmark/src/perf_counters.cc
@@ -0,0 +1,132 @@
+// Copyright 2021 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "perf_counters.h"
+
+#include <cstring>
+#include <vector>
+
+#if defined HAVE_LIBPFM
+#error #include "perfmon/pfmlib.h"
+#error #include "perfmon/pfmlib_perf_event.h"
+#endif
+
+namespace benchmark {
+namespace internal {
+
+constexpr size_t PerfCounterValues::kMaxCounters;
+
+#if defined HAVE_LIBPFM
+const bool PerfCounters::kSupported = true;
+
+bool PerfCounters::Initialize() { return pfm_initialize() == PFM_SUCCESS; }
+
+PerfCounters PerfCounters::Create(
+ const std::vector<std::string>& counter_names) {
+ if (counter_names.empty()) {
+ return NoCounters();
+ }
+ if (counter_names.size() > PerfCounterValues::kMaxCounters) {
+ GetErrorLogInstance()
+ << counter_names.size()
+ << " counters were requested. The minimum is 1, the maximum is "
+ << PerfCounterValues::kMaxCounters << "\n";
+ return NoCounters();
+ }
+ std::vector<int> counter_ids(counter_names.size());
+
+ const int mode = PFM_PLM3; // user mode only
+ for (size_t i = 0; i < counter_names.size(); ++i) {
+ const bool is_first = i == 0;
+ struct perf_event_attr attr {};
+ attr.size = sizeof(attr);
+ const int group_id = !is_first ? counter_ids[0] : -1;
+ const auto& name = counter_names[i];
+ if (name.empty()) {
+ GetErrorLogInstance() << "A counter name was the empty string\n";
+ return NoCounters();
+ }
+ pfm_perf_encode_arg_t arg{};
+ arg.attr = &attr;
+
+ const int pfm_get =
+ pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT, &arg);
+ if (pfm_get != PFM_SUCCESS) {
+ GetErrorLogInstance() << "Unknown counter name: " << name << "\n";
+ return NoCounters();
+ }
+ attr.disabled = is_first;
+ // Note: the man page for perf_event_create suggests inerit = true and
+ // read_format = PERF_FORMAT_GROUP don't work together, but that's not the
+ // case.
+ attr.inherit = true;
+ attr.pinned = is_first;
+ attr.exclude_kernel = true;
+ attr.exclude_user = false;
+ attr.exclude_hv = true;
+ // Read all counters in one read.
+ attr.read_format = PERF_FORMAT_GROUP;
+
+ int id = -1;
+ static constexpr size_t kNrOfSyscallRetries = 5;
+ // Retry syscall as it was interrupted often (b/64774091).
+ for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries;
+ ++num_retries) {
+ id = perf_event_open(&attr, 0, -1, group_id, 0);
+ if (id >= 0 || errno != EINTR) {
+ break;
+ }
+ }
+ if (id < 0) {
+ GetErrorLogInstance()
+ << "Failed to get a file descriptor for " << name << "\n";
+ return NoCounters();
+ }
+
+ counter_ids[i] = id;
+ }
+ if (ioctl(counter_ids[0], PERF_EVENT_IOC_ENABLE) != 0) {
+ GetErrorLogInstance() << "Failed to start counters\n";
+ return NoCounters();
+ }
+
+ return PerfCounters(counter_names, std::move(counter_ids));
+}
+
+PerfCounters::~PerfCounters() {
+ if (counter_ids_.empty()) {
+ return;
+ }
+ ioctl(counter_ids_[0], PERF_EVENT_IOC_DISABLE);
+ for (int fd : counter_ids_) {
+ close(fd);
+ }
+}
+#else // defined HAVE_LIBPFM
+const bool PerfCounters::kSupported = false;
+
+bool PerfCounters::Initialize() { return false; }
+
+PerfCounters PerfCounters::Create(
+ const std::vector<std::string>& counter_names) {
+ if (!counter_names.empty()) {
+ GetErrorLogInstance() << "Performance counters not supported.";
+ }
+ return NoCounters();
+}
+
+PerfCounters::~PerfCounters() = default;
+#endif // defined HAVE_LIBPFM
+} // namespace internal
+} // namespace benchmark
diff --git a/contrib/libs/benchmark/src/perf_counters.h b/contrib/libs/benchmark/src/perf_counters.h
new file mode 100644
index 00000000000..47ca1385e24
--- /dev/null
+++ b/contrib/libs/benchmark/src/perf_counters.h
@@ -0,0 +1,172 @@
+// Copyright 2021 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_PERF_COUNTERS_H
+#define BENCHMARK_PERF_COUNTERS_H
+
+#include <array>
+#include <cstdint>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+#include "log.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#include <unistd.h>
+#endif
+
+namespace benchmark {
+namespace internal {
+
+// Typically, we can only read a small number of counters. There is also a
+// padding preceding counter values, when reading multiple counters with one
+// syscall (which is desirable). PerfCounterValues abstracts these details.
+// The implementation ensures the storage is inlined, and allows 0-based
+// indexing into the counter values.
+// The object is used in conjunction with a PerfCounters object, by passing it
+// to Snapshot(). The values are populated such that
+// perfCounters->names()[i]'s value is obtained at position i (as given by
+// operator[]) of this object.
+class PerfCounterValues {
+ public:
+ explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) {
+ BM_CHECK_LE(nr_counters_, kMaxCounters);
+ }
+
+ uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; }
+
+ static constexpr size_t kMaxCounters = 3;
+
+ private:
+ friend class PerfCounters;
+ // Get the byte buffer in which perf counters can be captured.
+ // This is used by PerfCounters::Read
+ std::pair<char*, size_t> get_data_buffer() {
+ return {reinterpret_cast<char*>(values_.data()),
+ sizeof(uint64_t) * (kPadding + nr_counters_)};
+ }
+
+ static constexpr size_t kPadding = 1;
+ std::array<uint64_t, kPadding + kMaxCounters> values_;
+ const size_t nr_counters_;
+};
+
+// Collect PMU counters. The object, once constructed, is ready to be used by
+// calling read(). PMU counter collection is enabled from the time create() is
+// called, to obtain the object, until the object's destructor is called.
+class PerfCounters final {
+ public:
+ // True iff this platform supports performance counters.
+ static const bool kSupported;
+
+ bool IsValid() const { return is_valid_; }
+ static PerfCounters NoCounters() { return PerfCounters(); }
+
+ ~PerfCounters();
+ PerfCounters(PerfCounters&&) = default;
+ PerfCounters(const PerfCounters&) = delete;
+
+ // Platform-specific implementations may choose to do some library
+ // initialization here.
+ static bool Initialize();
+
+ // Return a PerfCounters object ready to read the counters with the names
+ // specified. The values are user-mode only. The counter name format is
+ // implementation and OS specific.
+ // TODO: once we move to C++-17, this should be a std::optional, and then the
+ // IsValid() boolean can be dropped.
+ static PerfCounters Create(const std::vector<std::string>& counter_names);
+
+ // Take a snapshot of the current value of the counters into the provided
+ // valid PerfCounterValues storage. The values are populated such that:
+ // names()[i]'s value is (*values)[i]
+ BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const {
+#ifndef BENCHMARK_OS_WINDOWS
+ assert(values != nullptr);
+ assert(IsValid());
+ auto buffer = values->get_data_buffer();
+ auto read_bytes = ::read(counter_ids_[0], buffer.first, buffer.second);
+ return static_cast<size_t>(read_bytes) == buffer.second;
+#else
+ (void)values;
+ return false;
+#endif
+ }
+
+ const std::vector<std::string>& names() const { return counter_names_; }
+ size_t num_counters() const { return counter_names_.size(); }
+
+ private:
+ PerfCounters(const std::vector<std::string>& counter_names,
+ std::vector<int>&& counter_ids)
+ : counter_ids_(std::move(counter_ids)),
+ counter_names_(counter_names),
+ is_valid_(true) {}
+ PerfCounters() : is_valid_(false) {}
+
+ std::vector<int> counter_ids_;
+ const std::vector<std::string> counter_names_;
+ const bool is_valid_;
+};
+
+// Typical usage of the above primitives.
+class PerfCountersMeasurement final {
+ public:
+ PerfCountersMeasurement(PerfCounters&& c)
+ : counters_(std::move(c)),
+ start_values_(counters_.IsValid() ? counters_.names().size() : 0),
+ end_values_(counters_.IsValid() ? counters_.names().size() : 0) {}
+
+ bool IsValid() const { return counters_.IsValid(); }
+
+ BENCHMARK_ALWAYS_INLINE void Start() {
+ assert(IsValid());
+ // Tell the compiler to not move instructions above/below where we take
+ // the snapshot.
+ ClobberMemory();
+ counters_.Snapshot(&start_values_);
+ ClobberMemory();
+ }
+
+ BENCHMARK_ALWAYS_INLINE std::vector<std::pair<std::string, double>>
+ StopAndGetMeasurements() {
+ assert(IsValid());
+ // Tell the compiler to not move instructions above/below where we take
+ // the snapshot.
+ ClobberMemory();
+ counters_.Snapshot(&end_values_);
+ ClobberMemory();
+
+ std::vector<std::pair<std::string, double>> ret;
+ for (size_t i = 0; i < counters_.names().size(); ++i) {
+ double measurement = static_cast<double>(end_values_[i]) -
+ static_cast<double>(start_values_[i]);
+ ret.push_back({counters_.names()[i], measurement});
+ }
+ return ret;
+ }
+
+ private:
+ PerfCounters counters_;
+ PerfCounterValues start_values_;
+ PerfCounterValues end_values_;
+};
+
+BENCHMARK_UNUSED static bool perf_init_anchor = PerfCounters::Initialize();
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_PERF_COUNTERS_H
diff --git a/contrib/libs/benchmark/src/re.h b/contrib/libs/benchmark/src/re.h
new file mode 100644
index 00000000000..630046782dc
--- /dev/null
+++ b/contrib/libs/benchmark/src/re.h
@@ -0,0 +1,158 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RE_H_
+#define BENCHMARK_RE_H_
+
+#include "internal_macros.h"
+
+// clang-format off
+
+#if !defined(HAVE_STD_REGEX) && \
+ !defined(HAVE_GNU_POSIX_REGEX) && \
+ !defined(HAVE_POSIX_REGEX)
+ // No explicit regex selection; detect based on builtin hints.
+ #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
+ #define HAVE_POSIX_REGEX 1
+ #elif __cplusplus >= 199711L
+ #define HAVE_STD_REGEX 1
+ #endif
+#endif
+
+// Prefer C regex libraries when compiling w/o exceptions so that we can
+// correctly report errors.
+#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
+ defined(BENCHMARK_HAVE_STD_REGEX) && \
+ (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
+ #undef HAVE_STD_REGEX
+#endif
+
+#if defined(HAVE_STD_REGEX)
+ #include <regex>
+#elif defined(HAVE_GNU_POSIX_REGEX)
+ #include <gnuregex.h>
+#elif defined(HAVE_POSIX_REGEX)
+ #include <regex.h>
+#else
+#error No regular expression backend was found!
+#endif
+
+// clang-format on
+
+#include <string>
+
+#include "check.h"
+
+namespace benchmark {
+
+// A wrapper around the POSIX regular expression API that provides automatic
+// cleanup
+class Regex {
+ public:
+ Regex() : init_(false) {}
+
+ ~Regex();
+
+ // Compile a regular expression matcher from spec. Returns true on success.
+ //
+ // On failure (and if error is not nullptr), error is populated with a human
+ // readable error message if an error occurs.
+ bool Init(const std::string& spec, std::string* error);
+
+ // Returns whether str matches the compiled regular expression.
+ bool Match(const std::string& str);
+
+ private:
+ bool init_;
+// Underlying regular expression object
+#if defined(HAVE_STD_REGEX)
+ std::regex re_;
+#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
+ regex_t re_;
+#else
+#error No regular expression backend implementation available
+#endif
+};
+
+#if defined(HAVE_STD_REGEX)
+
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
+ ((void)error); // suppress unused warning
+#else
+ try {
+#endif
+ re_ = std::regex(spec, std::regex_constants::extended);
+ init_ = true;
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+}
+catch (const std::regex_error& e) {
+ if (error) {
+ *error = e.what();
+ }
+}
+#endif
+return init_;
+}
+
+inline Regex::~Regex() {}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return std::regex_search(str, re_);
+}
+
+#else
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+ int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
+ if (ec != 0) {
+ if (error) {
+ size_t needed = regerror(ec, &re_, nullptr, 0);
+ char* errbuf = new char[needed];
+ regerror(ec, &re_, errbuf, needed);
+
+ // regerror returns the number of bytes necessary to null terminate
+ // the string, so we move that when assigning to error.
+ BM_CHECK_NE(needed, 0);
+ error->assign(errbuf, needed - 1);
+
+ delete[] errbuf;
+ }
+
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+inline Regex::~Regex() {
+ if (init_) {
+ regfree(&re_);
+ }
+}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
+}
+#endif
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RE_H_
diff --git a/contrib/libs/benchmark/src/reporter.cc b/contrib/libs/benchmark/src/reporter.cc
new file mode 100644
index 00000000000..1d2df17b90f
--- /dev/null
+++ b/contrib/libs/benchmark/src/reporter.cc
@@ -0,0 +1,114 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdlib>
+#include <iostream>
+#include <map>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+namespace internal {
+extern std::map<std::string, std::string> *global_context;
+}
+
+BenchmarkReporter::BenchmarkReporter()
+ : output_stream_(&std::cout), error_stream_(&std::cerr) {}
+
+BenchmarkReporter::~BenchmarkReporter() {}
+
+void BenchmarkReporter::PrintBasicContext(std::ostream *out,
+ Context const &context) {
+ BM_CHECK(out) << "cannot be null";
+ auto &Out = *out;
+
+ Out << LocalDateTimeString() << "\n";
+
+ if (context.executable_name)
+ Out << "Running " << context.executable_name << "\n";
+
+ const CPUInfo &info = context.cpu_info;
+ Out << "Run on (" << info.num_cpus << " X "
+ << (info.cycles_per_second / 1000000.0) << " MHz CPU "
+ << ((info.num_cpus > 1) ? "s" : "") << ")\n";
+ if (info.caches.size() != 0) {
+ Out << "CPU Caches:\n";
+ for (auto &CInfo : info.caches) {
+ Out << " L" << CInfo.level << " " << CInfo.type << " "
+ << (CInfo.size / 1024) << " KiB";
+ if (CInfo.num_sharing != 0)
+ Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
+ Out << "\n";
+ }
+ }
+ if (!info.load_avg.empty()) {
+ Out << "Load Average: ";
+ for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
+ Out << StrFormat("%.2f", *It++);
+ if (It != info.load_avg.end()) Out << ", ";
+ }
+ Out << "\n";
+ }
+
+ if (internal::global_context != nullptr) {
+ for (const auto &kv : *internal::global_context) {
+ Out << kv.first << ": " << kv.second << "\n";
+ }
+ }
+
+ if (CPUInfo::Scaling::ENABLED == info.scaling) {
+ Out << "***WARNING*** CPU scaling is enabled, the benchmark "
+ "real time measurements may be noisy and will incur extra "
+ "overhead.\n";
+ }
+
+#ifndef NDEBUG
+ Out << "***WARNING*** Library was built as DEBUG. Timings may be "
+ "affected.\n";
+#endif
+}
+
+// No initializer because it's already initialized to NULL.
+const char *BenchmarkReporter::Context::executable_name;
+
+BenchmarkReporter::Context::Context()
+ : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
+
+std::string BenchmarkReporter::Run::benchmark_name() const {
+ std::string name = run_name.str();
+ if (run_type == RT_Aggregate) {
+ name += "_" + aggregate_name;
+ }
+ return name;
+}
+
+double BenchmarkReporter::Run::GetAdjustedRealTime() const {
+ double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
+ double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/sleep.cc b/contrib/libs/benchmark/src/sleep.cc
new file mode 100644
index 00000000000..ab59000f24a
--- /dev/null
+++ b/contrib/libs/benchmark/src/sleep.cc
@@ -0,0 +1,66 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sleep.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <ctime>
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <windows.h>
+#endif
+
+#ifdef BENCHMARK_OS_ZOS
+#include <unistd.h>
+#endif
+
+namespace benchmark {
+#ifdef BENCHMARK_OS_WINDOWS
+// Window's Sleep takes milliseconds argument.
+void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
+void SleepForSeconds(double seconds) {
+ SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
+}
+#else // BENCHMARK_OS_WINDOWS
+void SleepForMicroseconds(int microseconds) {
+#ifdef BENCHMARK_OS_ZOS
+ // z/OS does not support nanosleep. Instead call sleep() and then usleep() to
+ // sleep for the remaining microseconds because usleep() will fail if its
+ // argument is greater than 1000000.
+ div_t sleepTime = div(microseconds, kNumMicrosPerSecond);
+ int seconds = sleepTime.quot;
+ while (seconds != 0) seconds = sleep(seconds);
+ while (usleep(sleepTime.rem) == -1 && errno == EINTR)
+ ;
+#else
+ struct timespec sleep_time;
+ sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
+ sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
+ ; // Ignore signals and wait for the full interval to elapse.
+#endif
+}
+
+void SleepForMilliseconds(int milliseconds) {
+ SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
+}
+
+void SleepForSeconds(double seconds) {
+ SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
+}
+#endif // BENCHMARK_OS_WINDOWS
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/sleep.h b/contrib/libs/benchmark/src/sleep.h
new file mode 100644
index 00000000000..f98551afe28
--- /dev/null
+++ b/contrib/libs/benchmark/src/sleep.h
@@ -0,0 +1,15 @@
+#ifndef BENCHMARK_SLEEP_H_
+#define BENCHMARK_SLEEP_H_
+
+namespace benchmark {
+const int kNumMillisPerSecond = 1000;
+const int kNumMicrosPerMilli = 1000;
+const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
+const int kNumNanosPerMicro = 1000;
+const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
+
+void SleepForMilliseconds(int milliseconds);
+void SleepForSeconds(double seconds);
+} // end namespace benchmark
+
+#endif // BENCHMARK_SLEEP_H_
diff --git a/contrib/libs/benchmark/src/statistics.cc b/contrib/libs/benchmark/src/statistics.cc
new file mode 100644
index 00000000000..3e5ef099397
--- /dev/null
+++ b/contrib/libs/benchmark/src/statistics.cc
@@ -0,0 +1,208 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "statistics.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "check.h"
+
+namespace benchmark {
+
+auto StatisticsSum = [](const std::vector<double>& v) {
+ return std::accumulate(v.begin(), v.end(), 0.0);
+};
+
+double StatisticsMean(const std::vector<double>& v) {
+ if (v.empty()) return 0.0;
+ return StatisticsSum(v) * (1.0 / v.size());
+}
+
+double StatisticsMedian(const std::vector<double>& v) {
+ if (v.size() < 3) return StatisticsMean(v);
+ std::vector<double> copy(v);
+
+ auto center = copy.begin() + v.size() / 2;
+ std::nth_element(copy.begin(), center, copy.end());
+
+ // did we have an odd number of samples?
+ // if yes, then center is the median
+ // it no, then we are looking for the average between center and the value
+ // before
+ if (v.size() % 2 == 1) return *center;
+ auto center2 = copy.begin() + v.size() / 2 - 1;
+ std::nth_element(copy.begin(), center2, copy.end());
+ return (*center + *center2) / 2.0;
+}
+
+// Return the sum of the squares of this sample set
+auto SumSquares = [](const std::vector<double>& v) {
+ return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
+};
+
+auto Sqr = [](const double dat) { return dat * dat; };
+auto Sqrt = [](const double dat) {
+ // Avoid NaN due to imprecision in the calculations
+ if (dat < 0.0) return 0.0;
+ return std::sqrt(dat);
+};
+
+double StatisticsStdDev(const std::vector<double>& v) {
+ const auto mean = StatisticsMean(v);
+ if (v.empty()) return mean;
+
+ // Sample standard deviation is undefined for n = 1
+ if (v.size() == 1) return 0.0;
+
+ const double avg_squares = SumSquares(v) * (1.0 / v.size());
+ return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
+}
+
+double StatisticsCV(const std::vector<double>& v) {
+ if (v.size() < 2) return 0.0;
+
+ const auto stddev = StatisticsStdDev(v);
+ const auto mean = StatisticsMean(v);
+
+ return stddev / mean;
+}
+
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ auto error_count =
+ std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.error_occurred; });
+
+ if (reports.size() - error_count < 2) {
+ // We don't report aggregated data if there was a single run.
+ return results;
+ }
+
+ // Accumulators.
+ std::vector<double> real_accumulated_time_stat;
+ std::vector<double> cpu_accumulated_time_stat;
+
+ real_accumulated_time_stat.reserve(reports.size());
+ cpu_accumulated_time_stat.reserve(reports.size());
+
+ // All repetitions should be run with the same number of iterations so we
+ // can take this information from the first benchmark.
+ const IterationCount run_iterations = reports.front().iterations;
+ // create stats for user counters
+ struct CounterStat {
+ Counter c;
+ std::vector<double> s;
+ };
+ std::map<std::string, CounterStat> counter_stats;
+ for (Run const& r : reports) {
+ for (auto const& cnt : r.counters) {
+ auto it = counter_stats.find(cnt.first);
+ if (it == counter_stats.end()) {
+ counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
+ it = counter_stats.find(cnt.first);
+ it->second.s.reserve(reports.size());
+ } else {
+ BM_CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
+ }
+ }
+ }
+
+ // Populate the accumulators.
+ for (Run const& run : reports) {
+ BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
+ BM_CHECK_EQ(run_iterations, run.iterations);
+ if (run.error_occurred) continue;
+ real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
+ cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
+ // user counters
+ for (auto const& cnt : run.counters) {
+ auto it = counter_stats.find(cnt.first);
+ BM_CHECK_NE(it, counter_stats.end());
+ it->second.s.emplace_back(cnt.second);
+ }
+ }
+
+ // Only add label if it is same for all runs
+ std::string report_label = reports[0].report_label;
+ for (std::size_t i = 1; i < reports.size(); i++) {
+ if (reports[i].report_label != report_label) {
+ report_label = "";
+ break;
+ }
+ }
+
+ const double iteration_rescale_factor =
+ double(reports.size()) / double(run_iterations);
+
+ for (const auto& Stat : *reports[0].statistics) {
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run data;
+ data.run_name = reports[0].run_name;
+ data.family_index = reports[0].family_index;
+ data.per_family_instance_index = reports[0].per_family_instance_index;
+ data.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ data.threads = reports[0].threads;
+ data.repetitions = reports[0].repetitions;
+ data.repetition_index = Run::no_repetition_index;
+ data.aggregate_name = Stat.name_;
+ data.aggregate_unit = Stat.unit_;
+ data.report_label = report_label;
+
+ // It is incorrect to say that an aggregate is computed over
+ // run's iterations, because those iterations already got averaged.
+ // Similarly, if there are N repetitions with 1 iterations each,
+ // an aggregate will be computed over N measurements, not 1.
+ // Thus it is best to simply use the count of separate reports.
+ data.iterations = reports.size();
+
+ data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
+ data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
+
+ if (data.aggregate_unit == StatisticUnit::kTime) {
+ // We will divide these times by data.iterations when reporting, but the
+ // data.iterations is not necessarily the scale of these measurements,
+ // because in each repetition, these timers are sum over all the iters.
+ // And if we want to say that the stats are over N repetitions and not
+ // M iterations, we need to multiply these by (N/M).
+ data.real_accumulated_time *= iteration_rescale_factor;
+ data.cpu_accumulated_time *= iteration_rescale_factor;
+ }
+
+ data.time_unit = reports[0].time_unit;
+
+ // user counters
+ for (auto const& kv : counter_stats) {
+ // Do *NOT* rescale the custom counters. They are already properly scaled.
+ const auto uc_stat = Stat.compute_(kv.second.s);
+ auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
+ counter_stats[kv.first].c.oneK);
+ data.counters[kv.first] = c;
+ }
+
+ results.push_back(data);
+ }
+
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/statistics.h b/contrib/libs/benchmark/src/statistics.h
new file mode 100644
index 00000000000..a9545a58c64
--- /dev/null
+++ b/contrib/libs/benchmark/src/statistics.h
@@ -0,0 +1,38 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef STATISTICS_H_
+#define STATISTICS_H_
+
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the mean, median and standard devation information
+// (and any user-specified info) for the specified list of reports. If 'reports'
+// contains less than two non-errored runs an empty vector is returned
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+double StatisticsMean(const std::vector<double>& v);
+double StatisticsMedian(const std::vector<double>& v);
+double StatisticsStdDev(const std::vector<double>& v);
+double StatisticsCV(const std::vector<double>& v);
+
+} // end namespace benchmark
+
+#endif // STATISTICS_H_
diff --git a/contrib/libs/benchmark/src/string_util.cc b/contrib/libs/benchmark/src/string_util.cc
new file mode 100644
index 00000000000..401fa13df7a
--- /dev/null
+++ b/contrib/libs/benchmark/src/string_util.cc
@@ -0,0 +1,265 @@
+#include "string_util.h"
+
+#include <array>
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+#include <cerrno>
+#endif
+#include <cmath>
+#include <cstdarg>
+#include <cstdio>
+#include <memory>
+#include <sstream>
+
+#include "arraysize.h"
+
+namespace benchmark {
+namespace {
+
+// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
+const char kBigSIUnits[] = "kMGTPEZY";
+// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
+const char kBigIECUnits[] = "KMGTPEZY";
+// milli, micro, nano, pico, femto, atto, zepto, yocto.
+const char kSmallSIUnits[] = "munpfazy";
+
+// We require that all three arrays have the same size.
+static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
+ "SI and IEC unit arrays must be the same size");
+static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
+ "Small SI and Big SI unit arrays must be the same size");
+
+static const int64_t kUnitsSize = arraysize(kBigSIUnits);
+
+void ToExponentAndMantissa(double val, double thresh, int precision,
+ double one_k, std::string* mantissa,
+ int64_t* exponent) {
+ std::stringstream mantissa_stream;
+
+ if (val < 0) {
+ mantissa_stream << "-";
+ val = -val;
+ }
+
+ // Adjust threshold so that it never excludes things which can't be rendered
+ // in 'precision' digits.
+ const double adjusted_threshold =
+ std::max(thresh, 1.0 / std::pow(10.0, precision));
+ const double big_threshold = adjusted_threshold * one_k;
+ const double small_threshold = adjusted_threshold;
+ // Values in ]simple_threshold,small_threshold[ will be printed as-is
+ const double simple_threshold = 0.01;
+
+ if (val > big_threshold) {
+ // Positive powers
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
+ scaled /= one_k;
+ if (scaled <= big_threshold) {
+ mantissa_stream << scaled;
+ *exponent = i + 1;
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else if (val < small_threshold) {
+ // Negative powers
+ if (val < simple_threshold) {
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
+ scaled *= one_k;
+ if (scaled >= small_threshold) {
+ mantissa_stream << scaled;
+ *exponent = -static_cast<int64_t>(i + 1);
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else {
+ mantissa_stream << val;
+ *exponent = 0;
+ }
+ *mantissa = mantissa_stream.str();
+}
+
+std::string ExponentToPrefix(int64_t exponent, bool iec) {
+ if (exponent == 0) return "";
+
+ const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
+ if (index >= kUnitsSize) return "";
+
+ const char* array =
+ (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
+ if (iec)
+ return array[index] + std::string("i");
+ else
+ return std::string(1, array[index]);
+}
+
+std::string ToBinaryStringFullySpecified(double value, double threshold,
+ int precision, double one_k = 1024.0) {
+ std::string mantissa;
+ int64_t exponent;
+ ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
+ &exponent);
+ return mantissa + ExponentToPrefix(exponent, false);
+}
+
+} // end namespace
+
+void AppendHumanReadable(int n, std::string* str) {
+ std::stringstream ss;
+ // Round down to the nearest SI prefix.
+ ss << ToBinaryStringFullySpecified(n, 1.0, 0);
+ *str += ss.str();
+}
+
+std::string HumanReadableNumber(double n, double one_k) {
+ // 1.1 means that figures up to 1.1k should be shown with the next unit down;
+ // this softens edge effects.
+ // 1 means that we should show one decimal place of precision.
+ return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
+}
+
+std::string StrFormatImp(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ // TODO(ericwf): use std::array for first attempt to avoid one memory
+ // allocation guess what the size might be
+ std::array<char, 256> local_buff;
+ std::size_t size = local_buff.size();
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // handle empty expansion
+ if (ret == 0) return std::string{};
+ if (static_cast<std::size_t>(ret) < size)
+ return std::string(local_buff.data());
+
+ // we did not provide a long enough buffer on our first attempt.
+ // add 1 to size to account for null-byte in size cast to prevent overflow
+ size = static_cast<std::size_t>(ret) + 1;
+ auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ vsnprintf(buff_ptr.get(), size, msg, args);
+ return std::string(buff_ptr.get());
+}
+
+std::string StrFormat(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ std::string tmp = StrFormatImp(format, args);
+ va_end(args);
+ return tmp;
+}
+
+std::vector<std::string> StrSplit(const std::string& str, char delim) {
+ if (str.empty()) return {};
+ std::vector<std::string> ret;
+ size_t first = 0;
+ size_t next = str.find(delim);
+ for (; next != std::string::npos;
+ first = next + 1, next = str.find(delim, first)) {
+ ret.push_back(str.substr(first, next - first));
+ }
+ ret.push_back(str.substr(first));
+ return ret;
+}
+
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const unsigned long result = strtoul(strStart, &strEnd, base);
+
+ const int strtoulErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtoulErrno == ERANGE) {
+ throw std::out_of_range("stoul failed: " + str +
+ " is outside of range of unsigned long");
+ } else if (strEnd == strStart || strtoulErrno != 0) {
+ throw std::invalid_argument("stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
+}
+
+int stoi(const std::string& str, size_t* pos, int base) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const long result = strtol(strStart, &strEnd, base);
+
+ const int strtolErrno = errno;
+ /* Restore previous errno */
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtolErrno == ERANGE || long(int(result)) != result) {
+ throw std::out_of_range("stoul failed: " + str +
+ " is outside of range of int");
+ } else if (strEnd == strStart || strtolErrno != 0) {
+ throw std::invalid_argument("stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return int(result);
+}
+
+double stod(const std::string& str, size_t* pos) {
+ /* Record previous errno */
+ const int oldErrno = errno;
+ errno = 0;
+
+ const char* strStart = str.c_str();
+ char* strEnd = const_cast<char*>(strStart);
+ const double result = strtod(strStart, &strEnd);
+
+ /* Restore previous errno */
+ const int strtodErrno = errno;
+ errno = oldErrno;
+
+ /* Check for errors and return */
+ if (strtodErrno == ERANGE) {
+ throw std::out_of_range("stoul failed: " + str +
+ " is outside of range of int");
+ } else if (strEnd == strStart || strtodErrno != 0) {
+ throw std::invalid_argument("stoul failed: " + str + " is not an integer");
+ }
+ if (pos != nullptr) {
+ *pos = static_cast<size_t>(strEnd - strStart);
+ }
+ return result;
+}
+#endif
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/string_util.h b/contrib/libs/benchmark/src/string_util.h
new file mode 100644
index 00000000000..ff3b7da47d6
--- /dev/null
+++ b/contrib/libs/benchmark/src/string_util.h
@@ -0,0 +1,65 @@
+#ifndef BENCHMARK_STRING_UTIL_H_
+#define BENCHMARK_STRING_UTIL_H_
+
+#include <sstream>
+#include <string>
+#include <utility>
+
+#include "internal_macros.h"
+
+namespace benchmark {
+
+void AppendHumanReadable(int n, std::string* str);
+
+std::string HumanReadableNumber(double n, double one_k = 1024.0);
+
+#if defined(__MINGW32__)
+__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
+#elif defined(__GNUC__)
+__attribute__((format(printf, 1, 2)))
+#endif
+std::string
+StrFormat(const char* format, ...);
+
+inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
+ return out;
+}
+
+template <class First, class... Rest>
+inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
+ out << std::forward<First>(f);
+ return StrCatImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+inline std::string StrCat(Args&&... args) {
+ std::ostringstream ss;
+ StrCatImp(ss, std::forward<Args>(args)...);
+ return ss.str();
+}
+
+std::vector<std::string> StrSplit(const std::string& str, char delim);
+
+// Disable lint checking for this block since it re-implements C functions.
+// NOLINTBEGIN
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+/*
+ * GNU STL in Android NDK lacks support for some C++11 functions, including
+ * stoul, stoi, stod. We reimplement them here using C functions strtoul,
+ * strtol, strtod. Note that reimplemented functions are in benchmark::
+ * namespace, not std:: namespace.
+ */
+unsigned long stoul(const std::string& str, size_t* pos = nullptr,
+ int base = 10);
+int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
+double stod(const std::string& str, size_t* pos = nullptr);
+#else
+using std::stod; // NOLINT(misc-unused-using-decls)
+using std::stoi; // NOLINT(misc-unused-using-decls)
+using std::stoul; // NOLINT(misc-unused-using-decls)
+#endif
+// NOLINTEND
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_STRING_UTIL_H_
diff --git a/contrib/libs/benchmark/src/sysinfo.cc b/contrib/libs/benchmark/src/sysinfo.cc
new file mode 100644
index 00000000000..1f359894073
--- /dev/null
+++ b/contrib/libs/benchmark/src/sysinfo.cc
@@ -0,0 +1,725 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <shlwapi.h>
+#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
+#include <versionhelpers.h>
+#include <windows.h>
+
+#include <codecvt>
+#else
+#include <fcntl.h>
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
+ defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \
+ defined BENCHMARK_OS_DRAGONFLY
+#define BENCHMARK_HAS_SYSCTL
+#include <sys/sysctl.h>
+#endif
+#endif
+#if defined(BENCHMARK_OS_SOLARIS)
+#error #include <kstat.h>
+#endif
+#if defined(BENCHMARK_OS_QNX)
+#include <sys/syspage.h>
+#endif
+
+#include <algorithm>
+#include <array>
+#include <bitset>
+#include <cerrno>
+#include <climits>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <iterator>
+#include <limits>
+#include <locale>
+#include <memory>
+#include <sstream>
+#include <utility>
+
+#include "check.h"
+#include "cycleclock.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+namespace {
+
+void PrintImp(std::ostream& out) { out << std::endl; }
+
+template <class First, class... Rest>
+void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
+ out << std::forward<First>(f);
+ PrintImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
+ PrintImp(std::cerr, std::forward<Args>(args)...);
+ std::exit(EXIT_FAILURE);
+}
+
+#ifdef BENCHMARK_HAS_SYSCTL
+
+/// ValueUnion - A type used to correctly alias the byte-for-byte output of
+/// `sysctl` with the result type it's to be interpreted as.
+struct ValueUnion {
+ union DataT {
+ uint32_t uint32_value;
+ uint64_t uint64_value;
+ // For correct aliasing of union members from bytes.
+ char bytes[8];
+ };
+ using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
+
+ // The size of the data union member + its trailing array size.
+ size_t Size;
+ DataPtr Buff;
+
+ public:
+ ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
+
+ explicit ValueUnion(size_t BuffSize)
+ : Size(sizeof(DataT) + BuffSize),
+ Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
+
+ ValueUnion(ValueUnion&& other) = default;
+
+ explicit operator bool() const { return bool(Buff); }
+
+ char* data() const { return Buff->bytes; }
+
+ std::string GetAsString() const { return std::string(data()); }
+
+ int64_t GetAsInteger() const {
+ if (Size == sizeof(Buff->uint32_value))
+ return static_cast<int32_t>(Buff->uint32_value);
+ else if (Size == sizeof(Buff->uint64_value))
+ return static_cast<int64_t>(Buff->uint64_value);
+ BENCHMARK_UNREACHABLE();
+ }
+
+ uint64_t GetAsUnsigned() const {
+ if (Size == sizeof(Buff->uint32_value))
+ return Buff->uint32_value;
+ else if (Size == sizeof(Buff->uint64_value))
+ return Buff->uint64_value;
+ BENCHMARK_UNREACHABLE();
+ }
+
+ template <class T, int N>
+ std::array<T, N> GetAsArray() {
+ const int ArrSize = sizeof(T) * N;
+ BM_CHECK_LE(ArrSize, Size);
+ std::array<T, N> Arr;
+ std::memcpy(Arr.data(), data(), ArrSize);
+ return Arr;
+ }
+};
+
+ValueUnion GetSysctlImp(std::string const& Name) {
+#if defined BENCHMARK_OS_OPENBSD
+ int mib[2];
+
+ mib[0] = CTL_HW;
+ if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")) {
+ ValueUnion buff(sizeof(int));
+
+ if (Name == "hw.ncpu") {
+ mib[1] = HW_NCPU;
+ } else {
+ mib[1] = HW_CPUSPEED;
+ }
+
+ if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
+ return ValueUnion();
+ }
+ return buff;
+ }
+ return ValueUnion();
+#else
+ size_t CurBuffSize = 0;
+ if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
+ return ValueUnion();
+
+ ValueUnion buff(CurBuffSize);
+ if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
+ return buff;
+ return ValueUnion();
+#endif
+}
+
+BENCHMARK_MAYBE_UNUSED
+bool GetSysctl(std::string const& Name, std::string* Out) {
+ Out->clear();
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ Out->assign(Buff.data());
+ return true;
+}
+
+template <class Tp,
+ class = typename std::enable_if<std::is_integral<Tp>::value>::type>
+bool GetSysctl(std::string const& Name, Tp* Out) {
+ *Out = 0;
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ *Out = static_cast<Tp>(Buff.GetAsUnsigned());
+ return true;
+}
+
+template <class Tp, size_t N>
+bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
+ auto Buff = GetSysctlImp(Name);
+ if (!Buff) return false;
+ *Out = Buff.GetAsArray<Tp, N>();
+ return true;
+}
+#endif
+
+template <class ArgT>
+bool ReadFromFile(std::string const& fname, ArgT* arg) {
+ *arg = ArgT();
+ std::ifstream f(fname.c_str());
+ if (!f.is_open()) return false;
+ f >> *arg;
+ return f.good();
+}
+
+CPUInfo::Scaling CpuScaling(int num_cpus) {
+ // We don't have a valid CPU count, so don't even bother.
+ if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN;
+#if defined(BENCHMARK_OS_QNX)
+ return CPUInfo::Scaling::UNKNOWN;
+#elif !defined(BENCHMARK_OS_WINDOWS)
+ // On Linux, the CPUfreq subsystem exposes CPU information as files on the
+ // local file system. If reading the exported files fails, then we may not be
+ // running on Linux, so we silently ignore all the read errors.
+ std::string res;
+ for (int cpu = 0; cpu < num_cpus; ++cpu) {
+ std::string governor_file =
+ StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
+ if (ReadFromFile(governor_file, &res) && res != "performance")
+ return CPUInfo::Scaling::ENABLED;
+ }
+ return CPUInfo::Scaling::DISABLED;
+#else
+ return CPUInfo::Scaling::UNKNOWN;
+#endif
+}
+
+int CountSetBitsInCPUMap(std::string Val) {
+ auto CountBits = [](std::string Part) {
+ using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
+ Part = "0x" + Part;
+ CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
+ return static_cast<int>(Mask.count());
+ };
+ size_t Pos;
+ int total = 0;
+ while ((Pos = Val.find(',')) != std::string::npos) {
+ total += CountBits(Val.substr(0, Pos));
+ Val = Val.substr(Pos + 1);
+ }
+ if (!Val.empty()) {
+ total += CountBits(Val);
+ }
+ return total;
+}
+
+BENCHMARK_MAYBE_UNUSED
+std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
+ std::vector<CPUInfo::CacheInfo> res;
+ std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
+ int Idx = 0;
+ while (true) {
+ CPUInfo::CacheInfo info;
+ std::string FPath = StrCat(dir, "index", Idx++, "/");
+ std::ifstream f(StrCat(FPath, "size").c_str());
+ if (!f.is_open()) break;
+ std::string suffix;
+ f >> info.size;
+ if (f.fail())
+ PrintErrorAndDie("Failed while reading file '", FPath, "size'");
+ if (f.good()) {
+ f >> suffix;
+ if (f.bad())
+ PrintErrorAndDie(
+ "Invalid cache size format: failed to read size suffix");
+ else if (f && suffix != "K")
+ PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
+ else if (suffix == "K")
+ info.size *= 1024;
+ }
+ if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
+ PrintErrorAndDie("Failed to read from file ", FPath, "type");
+ if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
+ PrintErrorAndDie("Failed to read from file ", FPath, "level");
+ std::string map_str;
+ if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
+ PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
+ info.num_sharing = CountSetBitsInCPUMap(map_str);
+ res.push_back(info);
+ }
+
+ return res;
+}
+
+#ifdef BENCHMARK_OS_MACOSX
+std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
+ std::vector<CPUInfo::CacheInfo> res;
+ std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
+ GetSysctl("hw.cacheconfig", &CacheCounts);
+
+ struct {
+ std::string name;
+ std::string type;
+ int level;
+ uint64_t num_sharing;
+ } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
+ {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
+ {"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
+ {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
+ for (auto& C : Cases) {
+ int val;
+ if (!GetSysctl(C.name, &val)) continue;
+ CPUInfo::CacheInfo info;
+ info.type = C.type;
+ info.level = C.level;
+ info.size = val;
+ info.num_sharing = static_cast<int>(C.num_sharing);
+ res.push_back(std::move(info));
+ }
+ return res;
+}
+#elif defined(BENCHMARK_OS_WINDOWS)
+std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
+ std::vector<CPUInfo::CacheInfo> res;
+ DWORD buffer_size = 0;
+ using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+ using CInfo = CACHE_DESCRIPTOR;
+
+ using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
+ GetLogicalProcessorInformation(nullptr, &buffer_size);
+ UPtr buff((PInfo*)malloc(buffer_size), &std::free);
+ if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
+ PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
+ GetLastError());
+
+ PInfo* it = buff.get();
+ PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
+
+ for (; it != end; ++it) {
+ if (it->Relationship != RelationCache) continue;
+ using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
+ BitSet B(it->ProcessorMask);
+ // To prevent duplicates, only consider caches where CPU 0 is specified
+ if (!B.test(0)) continue;
+ CInfo* Cache = &it->Cache;
+ CPUInfo::CacheInfo C;
+ C.num_sharing = static_cast<int>(B.count());
+ C.level = Cache->Level;
+ C.size = Cache->Size;
+ C.type = "Unknown";
+ switch (Cache->Type) {
+ case CacheUnified:
+ C.type = "Unified";
+ break;
+ case CacheInstruction:
+ C.type = "Instruction";
+ break;
+ case CacheData:
+ C.type = "Data";
+ break;
+ case CacheTrace:
+ C.type = "Trace";
+ break;
+ }
+ res.push_back(C);
+ }
+ return res;
+}
+#elif BENCHMARK_OS_QNX
+std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
+ std::vector<CPUInfo::CacheInfo> res;
+ struct cacheattr_entry* cache = SYSPAGE_ENTRY(cacheattr);
+ uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
+ int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize;
+ for (int i = 0; i < num; ++i) {
+ CPUInfo::CacheInfo info;
+ switch (cache->flags) {
+ case CACHE_FLAG_INSTR:
+ info.type = "Instruction";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_DATA:
+ info.type = "Data";
+ info.level = 1;
+ break;
+ case CACHE_FLAG_UNIFIED:
+ info.type = "Unified";
+ info.level = 2;
+ break;
+ case CACHE_FLAG_SHARED:
+ info.type = "Shared";
+ info.level = 3;
+ break;
+ default:
+ continue;
+ break;
+ }
+ info.size = cache->line_size * cache->num_lines;
+ info.num_sharing = 0;
+ res.push_back(std::move(info));
+ cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
+ }
+ return res;
+}
+#endif
+
+std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
+#ifdef BENCHMARK_OS_MACOSX
+ return GetCacheSizesMacOSX();
+#elif defined(BENCHMARK_OS_WINDOWS)
+ return GetCacheSizesWindows();
+#elif defined(BENCHMARK_OS_QNX)
+ return GetCacheSizesQNX();
+#else
+ return GetCacheSizesFromKVFS();
+#endif
+}
+
+std::string GetSystemName() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ std::string str;
+ const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1;
+ TCHAR hostname[COUNT] = {'\0'};
+ DWORD DWCOUNT = COUNT;
+ if (!GetComputerName(hostname, &DWCOUNT)) return std::string("");
+#ifndef UNICODE
+ str = std::string(hostname, DWCOUNT);
+#else
+ // Using wstring_convert, Is deprecated in C++17
+ using convert_type = std::codecvt_utf8<wchar_t>;
+ std::wstring_convert<convert_type, wchar_t> converter;
+ std::wstring wStr(hostname, DWCOUNT);
+ str = converter.to_bytes(wStr);
+#endif
+ return str;
+#else // defined(BENCHMARK_OS_WINDOWS)
+#ifndef HOST_NAME_MAX
+#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
+#define HOST_NAME_MAX 64
+#elif defined(BENCHMARK_OS_NACL)
+#define HOST_NAME_MAX 64
+#elif defined(BENCHMARK_OS_QNX)
+#define HOST_NAME_MAX 154
+#elif defined(BENCHMARK_OS_RTEMS)
+#define HOST_NAME_MAX 256
+#else
+#pragma message("HOST_NAME_MAX not defined. using 64")
+#define HOST_NAME_MAX 64
+#endif
+#endif // def HOST_NAME_MAX
+ char hostname[HOST_NAME_MAX];
+ int retVal = gethostname(hostname, HOST_NAME_MAX);
+ if (retVal != 0) return std::string("");
+ return std::string(hostname);
+#endif // Catch-all POSIX block.
+}
+
+int GetNumCPUs() {
+#ifdef BENCHMARK_HAS_SYSCTL
+ int NumCPU = -1;
+ if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
+ fprintf(stderr, "Err: %s\n", strerror(errno));
+ std::exit(EXIT_FAILURE);
+#elif defined(BENCHMARK_OS_WINDOWS)
+ SYSTEM_INFO sysinfo;
+ // Use memset as opposed to = {} to avoid GCC missing initializer false
+ // positives.
+ std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
+ GetSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors; // number of logical
+ // processors in the current
+ // group
+#elif defined(BENCHMARK_OS_SOLARIS)
+ // Returns -1 in case of a failure.
+ int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
+ if (NumCPU < 0) {
+ fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
+ strerror(errno));
+ }
+ return NumCPU;
+#elif defined(BENCHMARK_OS_QNX)
+ return static_cast<int>(_syspage_ptr->num_cpu);
+#else
+ int NumCPUs = 0;
+ int MaxID = -1;
+ std::ifstream f("/proc/cpuinfo");
+ if (!f.is_open()) {
+ std::cerr << "failed to open /proc/cpuinfo\n";
+ return -1;
+ }
+ const std::string Key = "processor";
+ std::string ln;
+ while (std::getline(f, ln)) {
+ if (ln.empty()) continue;
+ size_t SplitIdx = ln.find(':');
+ std::string value;
+#if defined(__s390__)
+ // s390 has another format in /proc/cpuinfo
+ // it needs to be parsed differently
+ if (SplitIdx != std::string::npos)
+ value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1);
+#else
+ if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+#endif
+ if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
+ NumCPUs++;
+ if (!value.empty()) {
+ int CurID = benchmark::stoi(value);
+ MaxID = std::max(CurID, MaxID);
+ }
+ }
+ }
+ if (f.bad()) {
+ std::cerr << "Failure reading /proc/cpuinfo\n";
+ return -1;
+ }
+ if (!f.eof()) {
+ std::cerr << "Failed to read to end of /proc/cpuinfo\n";
+ return -1;
+ }
+ f.close();
+
+ if ((MaxID + 1) != NumCPUs) {
+ fprintf(stderr,
+ "CPU ID assignments in /proc/cpuinfo seem messed up."
+ " This is usually caused by a bad BIOS.\n");
+ }
+ return NumCPUs;
+#endif
+ BENCHMARK_UNREACHABLE();
+}
+
+double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
+ // Currently, scaling is only used on linux path here,
+ // suppress diagnostics about it being unused on other paths.
+ (void)scaling;
+
+#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
+ long freq;
+
+ // If the kernel is exporting the tsc frequency use that. There are issues
+ // where cpuinfo_max_freq cannot be relied on because the BIOS may be
+ // exporintg an invalid p-state (on x86) or p-states may be used to put the
+ // processor in a new mode (turbo mode). Essentially, those frequencies
+ // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
+ // well.
+ if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
+ // If CPU scaling is disabled, use the *current* frequency.
+ // Note that we specifically don't want to read cpuinfo_cur_freq,
+ // because it is only readable by root.
+ || (scaling == CPUInfo::Scaling::DISABLED &&
+ ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq",
+ &freq))
+ // Otherwise, if CPU scaling may be in effect, we want to use
+ // the *maximum* frequency, not whatever CPU speed some random processor
+ // happens to be using now.
+ || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &freq)) {
+ // The value is in kHz (as the file name suggests). For example, on a
+ // 2GHz warpstation, the file contains the value "2000000".
+ return freq * 1000.0;
+ }
+
+ const double error_value = -1;
+ double bogo_clock = error_value;
+
+ std::ifstream f("/proc/cpuinfo");
+ if (!f.is_open()) {
+ std::cerr << "failed to open /proc/cpuinfo\n";
+ return error_value;
+ }
+
+ auto startsWithKey = [](std::string const& Value, std::string const& Key) {
+ if (Key.size() > Value.size()) return false;
+ auto Cmp = [&](char X, char Y) {
+ return std::tolower(X) == std::tolower(Y);
+ };
+ return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
+ };
+
+ std::string ln;
+ while (std::getline(f, ln)) {
+ if (ln.empty()) continue;
+ size_t SplitIdx = ln.find(':');
+ std::string value;
+ if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+ // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
+ // accept positive values. Some environments (virtual machines) report zero,
+ // which would cause infinite looping in WallTime_Init.
+ if (startsWithKey(ln, "cpu MHz")) {
+ if (!value.empty()) {
+ double cycles_per_second = benchmark::stod(value) * 1000000.0;
+ if (cycles_per_second > 0) return cycles_per_second;
+ }
+ } else if (startsWithKey(ln, "bogomips")) {
+ if (!value.empty()) {
+ bogo_clock = benchmark::stod(value) * 1000000.0;
+ if (bogo_clock < 0.0) bogo_clock = error_value;
+ }
+ }
+ }
+ if (f.bad()) {
+ std::cerr << "Failure reading /proc/cpuinfo\n";
+ return error_value;
+ }
+ if (!f.eof()) {
+ std::cerr << "Failed to read to end of /proc/cpuinfo\n";
+ return error_value;
+ }
+ f.close();
+ // If we found the bogomips clock, but nothing better, we'll use it (but
+ // we're not happy about it); otherwise, fallback to the rough estimation
+ // below.
+ if (bogo_clock >= 0.0) return bogo_clock;
+
+#elif defined BENCHMARK_HAS_SYSCTL
+ constexpr auto* FreqStr =
+#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
+ "machdep.tsc_freq";
+#elif defined BENCHMARK_OS_OPENBSD
+ "hw.cpuspeed";
+#elif defined BENCHMARK_OS_DRAGONFLY
+ "hw.tsc_frequency";
+#else
+ "hw.cpufrequency";
+#endif
+ unsigned long long hz = 0;
+#if defined BENCHMARK_OS_OPENBSD
+ if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
+#else
+ if (GetSysctl(FreqStr, &hz)) return hz;
+#endif
+ fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
+ FreqStr, strerror(errno));
+
+#elif defined BENCHMARK_OS_WINDOWS
+ // In NT, read MHz from the registry. If we fail to do so or we're in win9x
+ // then make a crude estimate.
+ DWORD data, data_size = sizeof(data);
+ if (IsWindowsXPOrGreater() &&
+ SUCCEEDED(
+ SHGetValueA(HKEY_LOCAL_MACHINE,
+ "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
+ "~MHz", nullptr, &data, &data_size)))
+ return static_cast<double>((int64_t)data *
+ (int64_t)(1000 * 1000)); // was mhz
+#elif defined(BENCHMARK_OS_SOLARIS)
+ kstat_ctl_t* kc = kstat_open();
+ if (!kc) {
+ std::cerr << "failed to open /dev/kstat\n";
+ return -1;
+ }
+ kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
+ if (!ksp) {
+ std::cerr << "failed to lookup in /dev/kstat\n";
+ return -1;
+ }
+ if (kstat_read(kc, ksp, NULL) < 0) {
+ std::cerr << "failed to read from /dev/kstat\n";
+ return -1;
+ }
+ kstat_named_t* knp =
+ (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
+ if (!knp) {
+ std::cerr << "failed to lookup data in /dev/kstat\n";
+ return -1;
+ }
+ if (knp->data_type != KSTAT_DATA_UINT64) {
+ std::cerr << "current_clock_Hz is of unexpected data type: "
+ << knp->data_type << "\n";
+ return -1;
+ }
+ double clock_hz = knp->value.ui64;
+ kstat_close(kc);
+ return clock_hz;
+#elif defined(BENCHMARK_OS_QNX)
+ return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
+ (int64_t)(1000 * 1000));
+#endif
+ // If we've fallen through, attempt to roughly estimate the CPU clock rate.
+ const int estimate_time_ms = 1000;
+ const auto start_ticks = cycleclock::Now();
+ SleepForMilliseconds(estimate_time_ms);
+ return static_cast<double>(cycleclock::Now() - start_ticks);
+}
+
+std::vector<double> GetLoadAvg() {
+#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \
+ !defined(__ANDROID__)
+ constexpr int kMaxSamples = 3;
+ std::vector<double> res(kMaxSamples, 0.0);
+ const int nelem = getloadavg(res.data(), kMaxSamples);
+ if (nelem < 1) {
+ res.clear();
+ } else {
+ res.resize(nelem);
+ }
+ return res;
+#else
+ return {};
+#endif
+}
+
+} // end namespace
+
+const CPUInfo& CPUInfo::Get() {
+ static const CPUInfo* info = new CPUInfo();
+ return *info;
+}
+
+CPUInfo::CPUInfo()
+ : num_cpus(GetNumCPUs()),
+ scaling(CpuScaling(num_cpus)),
+ cycles_per_second(GetCPUCyclesPerSecond(scaling)),
+ caches(GetCacheSizes()),
+ load_avg(GetLoadAvg()) {}
+
+const SystemInfo& SystemInfo::Get() {
+ static const SystemInfo* info = new SystemInfo();
+ return *info;
+}
+
+SystemInfo::SystemInfo() : name(GetSystemName()) {}
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/thread_manager.h b/contrib/libs/benchmark/src/thread_manager.h
new file mode 100644
index 00000000000..46802850894
--- /dev/null
+++ b/contrib/libs/benchmark/src/thread_manager.h
@@ -0,0 +1,63 @@
+#ifndef BENCHMARK_THREAD_MANAGER_H
+#define BENCHMARK_THREAD_MANAGER_H
+
+#include <atomic>
+
+#include "benchmark/benchmark.h"
+#include "mutex.h"
+
+namespace benchmark {
+namespace internal {
+
+class ThreadManager {
+ public:
+ explicit ThreadManager(int num_threads)
+ : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
+
+ Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
+ return benchmark_mutex_;
+ }
+
+ bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
+ return start_stop_barrier_.wait();
+ }
+
+ void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
+ start_stop_barrier_.removeThread();
+ if (--alive_threads_ == 0) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.notify_all();
+ }
+ }
+
+ void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.wait(lock.native_handle(),
+ [this]() { return alive_threads_ == 0; });
+ }
+
+ struct Result {
+ IterationCount iterations = 0;
+ double real_time_used = 0;
+ double cpu_time_used = 0;
+ double manual_time_used = 0;
+ int64_t complexity_n = 0;
+ std::string report_label_;
+ std::string error_message_;
+ bool has_error_ = false;
+ UserCounters counters;
+ };
+ GUARDED_BY(GetBenchmarkMutex()) Result results;
+
+ private:
+ mutable Mutex benchmark_mutex_;
+ std::atomic<int> alive_threads_;
+ Barrier start_stop_barrier_;
+ Mutex end_cond_mutex_;
+ Condition end_condition_;
+};
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_THREAD_MANAGER_H
diff --git a/contrib/libs/benchmark/src/thread_timer.h b/contrib/libs/benchmark/src/thread_timer.h
new file mode 100644
index 00000000000..eb23f595615
--- /dev/null
+++ b/contrib/libs/benchmark/src/thread_timer.h
@@ -0,0 +1,86 @@
+#ifndef BENCHMARK_THREAD_TIMER_H
+#define BENCHMARK_THREAD_TIMER_H
+
+#include "check.h"
+#include "timers.h"
+
+namespace benchmark {
+namespace internal {
+
+class ThreadTimer {
+ explicit ThreadTimer(bool measure_process_cpu_time_)
+ : measure_process_cpu_time(measure_process_cpu_time_) {}
+
+ public:
+ static ThreadTimer Create() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/false);
+ }
+ static ThreadTimer CreateProcessCpuTime() {
+ return ThreadTimer(/*measure_process_cpu_time_=*/true);
+ }
+
+ // Called by each thread
+ void StartTimer() {
+ running_ = true;
+ start_real_time_ = ChronoClockNow();
+ start_cpu_time_ = ReadCpuTimerOfChoice();
+ }
+
+ // Called by each thread
+ void StopTimer() {
+ BM_CHECK(running_);
+ running_ = false;
+ real_time_used_ += ChronoClockNow() - start_real_time_;
+ // Floating point error can result in the subtraction producing a negative
+ // time. Guard against that.
+ cpu_time_used_ +=
+ std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
+ }
+
+ // Called by each thread
+ void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
+
+ bool running() const { return running_; }
+
+ // REQUIRES: timer is not running
+ double real_time_used() const {
+ BM_CHECK(!running_);
+ return real_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double cpu_time_used() const {
+ BM_CHECK(!running_);
+ return cpu_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double manual_time_used() const {
+ BM_CHECK(!running_);
+ return manual_time_used_;
+ }
+
+ private:
+ double ReadCpuTimerOfChoice() const {
+ if (measure_process_cpu_time) return ProcessCPUUsage();
+ return ThreadCPUUsage();
+ }
+
+ // should the thread, or the process, time be measured?
+ const bool measure_process_cpu_time;
+
+ bool running_ = false; // Is the timer running
+ double start_real_time_ = 0; // If running_
+ double start_cpu_time_ = 0; // If running_
+
+ // Accumulated time so far (does not contain current slice if running_)
+ double real_time_used_ = 0;
+ double cpu_time_used_ = 0;
+ // Manually set iteration time. User sets this with SetIterationTime(seconds).
+ double manual_time_used_ = 0;
+};
+
+} // namespace internal
+} // namespace benchmark
+
+#endif // BENCHMARK_THREAD_TIMER_H
diff --git a/contrib/libs/benchmark/src/timers.cc b/contrib/libs/benchmark/src/timers.cc
new file mode 100644
index 00000000000..9a21cb1436b
--- /dev/null
+++ b/contrib/libs/benchmark/src/timers.cc
@@ -0,0 +1,259 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "timers.h"
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <shlwapi.h>
+#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
+#include <versionhelpers.h>
+#include <windows.h>
+#else
+#include <fcntl.h>
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \
+ defined BENCHMARK_OS_MACOSX
+#include <sys/sysctl.h>
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/thread_act.h>
+#endif
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#error #include <emscripten.h>
+#endif
+
+#include <cerrno>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <iostream>
+#include <limits>
+#include <mutex>
+
+#include "check.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+
+// Suppress unused warnings on helper functions.
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+
+namespace {
+#if defined(BENCHMARK_OS_WINDOWS)
+double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
+ ULARGE_INTEGER kernel;
+ ULARGE_INTEGER user;
+ kernel.HighPart = kernel_time.dwHighDateTime;
+ kernel.LowPart = kernel_time.dwLowDateTime;
+ user.HighPart = user_time.dwHighDateTime;
+ user.LowPart = user_time.dwLowDateTime;
+ return (static_cast<double>(kernel.QuadPart) +
+ static_cast<double>(user.QuadPart)) *
+ 1e-7;
+}
+#elif !defined(BENCHMARK_OS_FUCHSIA)
+double MakeTime(struct rusage const& ru) {
+ return (static_cast<double>(ru.ru_utime.tv_sec) +
+ static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
+ static_cast<double>(ru.ru_stime.tv_sec) +
+ static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
+}
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+double MakeTime(thread_basic_info_data_t const& info) {
+ return (static_cast<double>(info.user_time.seconds) +
+ static_cast<double>(info.user_time.microseconds) * 1e-6 +
+ static_cast<double>(info.system_time.seconds) +
+ static_cast<double>(info.system_time.microseconds) * 1e-6);
+}
+#endif
+#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
+double MakeTime(struct timespec const& ts) {
+ return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
+}
+#endif
+
+BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
+ std::cerr << "ERROR: " << msg << std::endl;
+ std::exit(EXIT_FAILURE);
+}
+
+} // end namespace
+
+double ProcessCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE proc = GetCurrentProcess();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
+ &user_time))
+ return MakeTime(kernel_time, user_time);
+ DiagnoseAndExit("GetProccessTimes() failed");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
+ // Use Emscripten-specific API. Reported CPU time would be exactly the
+ // same as total time, but this is ok because there aren't long-latency
+ // syncronous system calls in Emscripten.
+ return emscripten_get_now() * 1e-3;
+#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11.
+ // See https://github.com/google/benchmark/pull/292
+ struct timespec spec;
+ if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
+ return MakeTime(spec);
+ DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
+#else
+ struct rusage ru;
+ if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
+ DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
+#endif
+}
+
+double ThreadCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE this_thread = GetCurrentThread();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
+ &user_time);
+ return MakeTime(kernel_time, user_time);
+#elif defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11.
+ // See https://github.com/google/benchmark/pull/292
+ mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+ thread_basic_info_data_t info;
+ mach_port_t thread = pthread_mach_thread_np(pthread_self());
+ if (thread_info(thread, THREAD_BASIC_INFO,
+ reinterpret_cast<thread_info_t>(&info),
+ &count) == KERN_SUCCESS) {
+ return MakeTime(info);
+ }
+ DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // Emscripten doesn't support traditional threads
+ return ProcessCPUUsage();
+#elif defined(BENCHMARK_OS_RTEMS)
+ // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
+ // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
+ return ProcessCPUUsage();
+#elif defined(BENCHMARK_OS_SOLARIS)
+ struct rusage ru;
+ if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
+ DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
+#elif defined(CLOCK_THREAD_CPUTIME_ID)
+ struct timespec ts;
+ if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
+ DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
+#else
+#error Per-thread timing is not available on your system.
+#endif
+}
+
+std::string LocalDateTimeString() {
+ // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM.
+ typedef std::chrono::system_clock Clock;
+ std::time_t now = Clock::to_time_t(Clock::now());
+ const std::size_t kTzOffsetLen = 6;
+ const std::size_t kTimestampLen = 19;
+
+ std::size_t tz_len;
+ std::size_t timestamp_len;
+ long int offset_minutes;
+ char tz_offset_sign = '+';
+ // tz_offset is set in one of three ways:
+ // * strftime with %z - This either returns empty or the ISO 8601 time. The
+ // maximum length an
+ // ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero).
+ // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to
+ // 19 for %02li,
+ // one for :, up to 19 %02li, plus trailing zero).
+ // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus
+ // trailing zero).
+ //
+ // Thus, the maximum size this needs to be is 41.
+ char tz_offset[41];
+ // Long enough buffer to avoid format-overflow warnings
+ char storage[128];
+
+#if defined(BENCHMARK_OS_WINDOWS)
+ std::tm* timeinfo_p = ::localtime(&now);
+#else
+ std::tm timeinfo;
+ std::tm* timeinfo_p = &timeinfo;
+ ::localtime_r(&now, &timeinfo);
+#endif
+
+ tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p);
+
+ if (tz_len < kTzOffsetLen && tz_len > 1) {
+ // Timezone offset was written. strftime writes offset as +HHMM or -HHMM,
+ // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse
+ // the offset as an integer, then reprint it to a string.
+
+ offset_minutes = ::strtol(tz_offset, NULL, 10);
+ if (offset_minutes < 0) {
+ offset_minutes *= -1;
+ tz_offset_sign = '-';
+ }
+
+ tz_len =
+ ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
+ tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
+ BM_CHECK(tz_len == kTzOffsetLen);
+ ((void)tz_len); // Prevent unused variable warning in optimized build.
+ } else {
+ // Unknown offset. RFC3339 specifies that unknown local offsets should be
+ // written as UTC time with -00:00 timezone.
+#if defined(BENCHMARK_OS_WINDOWS)
+ // Potential race condition if another thread calls localtime or gmtime.
+ timeinfo_p = ::gmtime(&now);
+#else
+ ::gmtime_r(&now, &timeinfo);
+#endif
+
+ strncpy(tz_offset, "-00:00", kTzOffsetLen + 1);
+ }
+
+ timestamp_len =
+ std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", timeinfo_p);
+ BM_CHECK(timestamp_len == kTimestampLen);
+ // Prevent unused variable warning in optimized build.
+ ((void)kTimestampLen);
+
+ std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1);
+ return std::string(storage);
+}
+
+} // end namespace benchmark
diff --git a/contrib/libs/benchmark/src/timers.h b/contrib/libs/benchmark/src/timers.h
new file mode 100644
index 00000000000..65606ccd93d
--- /dev/null
+++ b/contrib/libs/benchmark/src/timers.h
@@ -0,0 +1,48 @@
+#ifndef BENCHMARK_TIMERS_H
+#define BENCHMARK_TIMERS_H
+
+#include <chrono>
+#include <string>
+
+namespace benchmark {
+
+// Return the CPU usage of the current process
+double ProcessCPUUsage();
+
+// Return the CPU usage of the children of the current process
+double ChildrenCPUUsage();
+
+// Return the CPU usage of the current thread
+double ThreadCPUUsage();
+
+#if defined(HAVE_STEADY_CLOCK)
+template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
+struct ChooseSteadyClock {
+ typedef std::chrono::high_resolution_clock type;
+};
+
+template <>
+struct ChooseSteadyClock<false> {
+ typedef std::chrono::steady_clock type;
+};
+#endif
+
+struct ChooseClockType {
+#if defined(HAVE_STEADY_CLOCK)
+ typedef ChooseSteadyClock<>::type type;
+#else
+ typedef std::chrono::high_resolution_clock type;
+#endif
+};
+
+inline double ChronoClockNow() {
+ typedef ChooseClockType::type ClockType;
+ using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
+ return FpSeconds(ClockType::now().time_since_epoch()).count();
+}
+
+std::string LocalDateTimeString();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_TIMERS_H