aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/base/internal
diff options
context:
space:
mode:
authoranastasy888 <anastasy888@yandex-team.ru>2022-02-10 16:45:55 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:55 +0300
commit3a7a498715ef1b66f5054455421b845e45e3a653 (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/restricted/abseil-cpp/absl/base/internal
parent49f765d71da452ea93138a25559dfa68dd76c7f3 (diff)
downloadydb-3a7a498715ef1b66f5054455421b845e45e3a653.tar.gz
Restoring authorship annotation for <anastasy888@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/base/internal')
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook.h332
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h64
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc210
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.h180
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h312
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/endian.h528
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/exception_safety_testing.h2184
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/exception_testing.h84
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/hide_ptr.h94
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/identity.h66
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h214
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h88
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/invoke.h362
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc1220
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.h248
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc/ya.make60
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h202
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/per_thread_tls.h104
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/pretty_function.h66
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc404
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h342
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/raw_logging/ya.make54
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/scheduling_mode.h108
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.cc158
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.h82
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env/ya.make56
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc378
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h420
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc66
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc122
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc82
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.cc158
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h160
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait/ya.make46
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc64
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc754
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.h116
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/thread_annotations.h542
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc254
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h386
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc200
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.h142
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate/ya.make56
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/tsan_mutex_interface.h130
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h158
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc194
-rw-r--r--contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h230
47 files changed, 6240 insertions, 6240 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook.h b/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook.h
index a0176c3787..ae21cd7fe5 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook.h
@@ -1,47 +1,47 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
-#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <utility>
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(_MSC_VER) && !defined(__clang__)
-#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
-#else
-#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
-#endif
-
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
+#else
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
+#endif
+
#if defined(_MSC_VER)
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
#else
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
#endif
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-template <typename T>
-class AtomicHook;
-
+namespace base_internal {
+
+template <typename T>
+class AtomicHook;
+
// To workaround AtomicHook not being constant-initializable on some platforms,
// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES`
// instead of `ABSL_CONST_INIT`.
@@ -51,150 +51,150 @@ class AtomicHook;
#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
#endif
-// `AtomicHook` is a helper class, templatized on a raw function pointer type,
-// for implementing Abseil customization hooks. It is a callable object that
-// dispatches to the registered hook. Objects of type `AtomicHook` must have
-// static or thread storage duration.
-//
-// A default constructed object performs a no-op (and returns a default
-// constructed object) if no hook has been registered.
-//
+// `AtomicHook` is a helper class, templatized on a raw function pointer type,
+// for implementing Abseil customization hooks. It is a callable object that
+// dispatches to the registered hook. Objects of type `AtomicHook` must have
+// static or thread storage duration.
+//
+// A default constructed object performs a no-op (and returns a default
+// constructed object) if no hook has been registered.
+//
// Hooks can be pre-registered via constant initialization, for example:
//
// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
// my_hook(DefaultAction);
//
-// and then changed at runtime via a call to `Store()`.
-//
-// Reads and writes guarantee memory_order_acquire/memory_order_release
-// semantics.
-template <typename ReturnType, typename... Args>
-class AtomicHook<ReturnType (*)(Args...)> {
- public:
- using FnPtr = ReturnType (*)(Args...);
-
- // Constructs an object that by default performs a no-op (and
- // returns a default constructed object) when no hook as been registered.
- constexpr AtomicHook() : AtomicHook(DummyFunction) {}
-
- // Constructs an object that by default dispatches to/returns the
- // pre-registered default_fn when no hook has been registered at runtime.
-#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
- explicit constexpr AtomicHook(FnPtr default_fn)
- : hook_(default_fn), default_fn_(default_fn) {}
+// and then changed at runtime via a call to `Store()`.
+//
+// Reads and writes guarantee memory_order_acquire/memory_order_release
+// semantics.
+template <typename ReturnType, typename... Args>
+class AtomicHook<ReturnType (*)(Args...)> {
+ public:
+ using FnPtr = ReturnType (*)(Args...);
+
+ // Constructs an object that by default performs a no-op (and
+ // returns a default constructed object) when no hook as been registered.
+ constexpr AtomicHook() : AtomicHook(DummyFunction) {}
+
+ // Constructs an object that by default dispatches to/returns the
+ // pre-registered default_fn when no hook has been registered at runtime.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+ explicit constexpr AtomicHook(FnPtr default_fn)
+ : hook_(default_fn), default_fn_(default_fn) {}
#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(kUninitialized), default_fn_(default_fn) {}
-#else
+#else
// As of January 2020, on all known versions of MSVC this constructor runs in
// the global constructor sequence. If `Store()` is called by a dynamic
// initializer, we want to preserve the value, even if this constructor runs
// after the call to `Store()`. If not, `hook_` will be
// zero-initialized by the linker and we have no need to set it.
- // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
- explicit constexpr AtomicHook(FnPtr default_fn)
- : /* hook_(deliberately omitted), */ default_fn_(default_fn) {
- static_assert(kUninitialized == 0, "here we rely on zero-initialization");
- }
-#endif
-
- // Stores the provided function pointer as the value for this hook.
- //
- // This is intended to be called once. Multiple calls are legal only if the
- // same function pointer is provided for each call. The store is implemented
- // as a memory_order_release operation, and read accesses are implemented as
- // memory_order_acquire.
- void Store(FnPtr fn) {
- bool success = DoStore(fn);
- static_cast<void>(success);
- assert(success);
- }
-
- // Invokes the registered callback. If no callback has yet been registered, a
- // default-constructed object of the appropriate type is returned instead.
- template <typename... CallArgs>
- ReturnType operator()(CallArgs&&... args) const {
- return DoLoad()(std::forward<CallArgs>(args)...);
- }
-
- // Returns the registered callback, or nullptr if none has been registered.
- // Useful if client code needs to conditionalize behavior based on whether a
- // callback was registered.
- //
- // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
- // operator()() will perform a no-op if no callback was registered, while
- // Load()() will dereference a null function pointer. Prefer operator()() to
- // Load()() unless you must conditionalize behavior on whether a hook was
- // registered.
- FnPtr Load() const {
- FnPtr ptr = DoLoad();
- return (ptr == DummyFunction) ? nullptr : ptr;
- }
-
- private:
- static ReturnType DummyFunction(Args...) {
- return ReturnType();
- }
-
- // Current versions of MSVC (as of September 2017) have a broken
- // implementation of std::atomic<T*>: Its constructor attempts to do the
- // equivalent of a reinterpret_cast in a constexpr context, which is not
- // allowed.
- //
- // This causes an issue when building with LLVM under Windows. To avoid this,
- // we use a less-efficient, intptr_t-based implementation on Windows.
-#if ABSL_HAVE_WORKING_ATOMIC_POINTER
- // Return the stored value, or DummyFunction if no value has been stored.
- FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
-
- // Store the given value. Returns false if a different value was already
- // stored to this object.
- bool DoStore(FnPtr fn) {
- assert(fn);
- FnPtr expected = default_fn_;
- const bool store_succeeded = hook_.compare_exchange_strong(
- expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
- const bool same_value_already_stored = (expected == fn);
- return store_succeeded || same_value_already_stored;
- }
-
- std::atomic<FnPtr> hook_;
-#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER
- // Use a sentinel value unlikely to be the address of an actual function.
- static constexpr intptr_t kUninitialized = 0;
-
- static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
- "intptr_t can't contain a function pointer");
-
- FnPtr DoLoad() const {
- const intptr_t value = hook_.load(std::memory_order_acquire);
- if (value == kUninitialized) {
- return default_fn_;
- }
- return reinterpret_cast<FnPtr>(value);
- }
-
- bool DoStore(FnPtr fn) {
- assert(fn);
- const auto value = reinterpret_cast<intptr_t>(fn);
- intptr_t expected = kUninitialized;
- const bool store_succeeded = hook_.compare_exchange_strong(
- expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
- const bool same_value_already_stored = (expected == value);
- return store_succeeded || same_value_already_stored;
- }
-
- std::atomic<intptr_t> hook_;
-#endif
-
- const FnPtr default_fn_;
-};
-
-#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
-#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
-
-} // namespace base_internal
+ // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+ explicit constexpr AtomicHook(FnPtr default_fn)
+ : /* hook_(deliberately omitted), */ default_fn_(default_fn) {
+ static_assert(kUninitialized == 0, "here we rely on zero-initialization");
+ }
+#endif
+
+ // Stores the provided function pointer as the value for this hook.
+ //
+ // This is intended to be called once. Multiple calls are legal only if the
+ // same function pointer is provided for each call. The store is implemented
+ // as a memory_order_release operation, and read accesses are implemented as
+ // memory_order_acquire.
+ void Store(FnPtr fn) {
+ bool success = DoStore(fn);
+ static_cast<void>(success);
+ assert(success);
+ }
+
+ // Invokes the registered callback. If no callback has yet been registered, a
+ // default-constructed object of the appropriate type is returned instead.
+ template <typename... CallArgs>
+ ReturnType operator()(CallArgs&&... args) const {
+ return DoLoad()(std::forward<CallArgs>(args)...);
+ }
+
+ // Returns the registered callback, or nullptr if none has been registered.
+ // Useful if client code needs to conditionalize behavior based on whether a
+ // callback was registered.
+ //
+ // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
+ // operator()() will perform a no-op if no callback was registered, while
+ // Load()() will dereference a null function pointer. Prefer operator()() to
+ // Load()() unless you must conditionalize behavior on whether a hook was
+ // registered.
+ FnPtr Load() const {
+ FnPtr ptr = DoLoad();
+ return (ptr == DummyFunction) ? nullptr : ptr;
+ }
+
+ private:
+ static ReturnType DummyFunction(Args...) {
+ return ReturnType();
+ }
+
+ // Current versions of MSVC (as of September 2017) have a broken
+ // implementation of std::atomic<T*>: Its constructor attempts to do the
+ // equivalent of a reinterpret_cast in a constexpr context, which is not
+ // allowed.
+ //
+ // This causes an issue when building with LLVM under Windows. To avoid this,
+ // we use a less-efficient, intptr_t-based implementation on Windows.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER
+ // Return the stored value, or DummyFunction if no value has been stored.
+ FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
+
+ // Store the given value. Returns false if a different value was already
+ // stored to this object.
+ bool DoStore(FnPtr fn) {
+ assert(fn);
+ FnPtr expected = default_fn_;
+ const bool store_succeeded = hook_.compare_exchange_strong(
+ expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
+ const bool same_value_already_stored = (expected == fn);
+ return store_succeeded || same_value_already_stored;
+ }
+
+ std::atomic<FnPtr> hook_;
+#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER
+ // Use a sentinel value unlikely to be the address of an actual function.
+ static constexpr intptr_t kUninitialized = 0;
+
+ static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
+ "intptr_t can't contain a function pointer");
+
+ FnPtr DoLoad() const {
+ const intptr_t value = hook_.load(std::memory_order_acquire);
+ if (value == kUninitialized) {
+ return default_fn_;
+ }
+ return reinterpret_cast<FnPtr>(value);
+ }
+
+ bool DoStore(FnPtr fn) {
+ assert(fn);
+ const auto value = reinterpret_cast<intptr_t>(fn);
+ intptr_t expected = kUninitialized;
+ const bool store_succeeded = hook_.compare_exchange_strong(
+ expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
+ const bool same_value_already_stored = (expected == value);
+ return store_succeeded || same_value_already_stored;
+ }
+
+ std::atomic<intptr_t> hook_;
+#endif
+
+ const FnPtr default_fn_;
+};
+
+#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
+#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h b/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
index 5c16867a0f..3e72b4977d 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h
@@ -1,34 +1,34 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
-#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
-
-#include "absl/base/internal/atomic_hook.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace atomic_hook_internal {
-
-using VoidF = void (*)();
-extern absl::base_internal::AtomicHook<VoidF> func;
-extern int default_func_calls;
-void DefaultFunc();
-void RegisterFunc(VoidF func);
-
-} // namespace atomic_hook_internal
+namespace atomic_hook_internal {
+
+using VoidF = void (*)();
+extern absl::base_internal::AtomicHook<VoidF> func;
+extern int default_func_calls;
+void DefaultFunc();
+void RegisterFunc(VoidF func);
+
+} // namespace atomic_hook_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+} // namespace absl
+
+#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
index ce15b63b63..0e65005b89 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.cc
@@ -1,107 +1,107 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The implementation of CycleClock::Frequency.
-//
-// NOTE: only i386 and x86_64 have been well tested.
-// PPC, sparc, alpha, and ia64 are based on
-// http://peter.kuscsik.com/wordpress/?p=14
-// with modifications by m3b. See also
-// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
-
-#include "absl/base/internal/cycleclock.h"
-
-#include <atomic>
-#include <chrono> // NOLINT(build/c++11)
-
-#include "absl/base/internal/unscaledcycleclock.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+// http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b. See also
+// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "absl/base/internal/cycleclock.h"
+
+#include <atomic>
+#include <chrono> // NOLINT(build/c++11)
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-#if ABSL_USE_UNSCALED_CYCLECLOCK
-
-namespace {
-
-#ifdef NDEBUG
-#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-// Not debug mode and the UnscaledCycleClock frequency is the CPU
-// frequency. Scale the CycleClock to prevent overflow if someone
-// tries to represent the time as cycles since the Unix epoch.
-static constexpr int32_t kShift = 1;
-#else
-// Not debug mode and the UnscaledCycleClock isn't operating at the
-// raw CPU frequency. There is no need to do any scaling, so don't
-// needlessly sacrifice precision.
-static constexpr int32_t kShift = 0;
-#endif
-#else
-// In debug mode use a different shift to discourage depending on a
-// particular shift value.
-static constexpr int32_t kShift = 2;
-#endif
-
-static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
-static std::atomic<CycleClockSourceFunc> cycle_clock_source;
-
-CycleClockSourceFunc LoadCycleClockSource() {
- // Optimize for the common case (no callback) by first doing a relaxed load;
- // this is significantly faster on non-x86 platforms.
- if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
- return nullptr;
- }
- // This corresponds to the store(std::memory_order_release) in
- // CycleClockSource::Register, and makes sure that any updates made prior to
- // registering the callback are visible to this thread before the callback is
- // invoked.
- return cycle_clock_source.load(std::memory_order_acquire);
-}
-
-} // namespace
-
-int64_t CycleClock::Now() {
- auto fn = LoadCycleClockSource();
- if (fn == nullptr) {
- return base_internal::UnscaledCycleClock::Now() >> kShift;
- }
- return fn() >> kShift;
-}
-
-double CycleClock::Frequency() {
- return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
-}
-
-void CycleClockSource::Register(CycleClockSourceFunc source) {
- // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
- cycle_clock_source.store(source, std::memory_order_release);
-}
-
-#else
-
-int64_t CycleClock::Now() {
- return std::chrono::duration_cast<std::chrono::nanoseconds>(
- std::chrono::steady_clock::now().time_since_epoch())
- .count();
-}
-
-double CycleClock::Frequency() {
- return 1e9;
-}
-
-#endif
-
-} // namespace base_internal
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace {
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency. Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+static constexpr int32_t kShift = 1;
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+static constexpr int32_t kShift = 0;
+#endif
+#else
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+static constexpr int32_t kShift = 2;
+#endif
+
+static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+
+CycleClockSourceFunc LoadCycleClockSource() {
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback is
+ // invoked.
+ return cycle_clock_source.load(std::memory_order_acquire);
+}
+
+} // namespace
+
+int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+
+double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+ // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+ cycle_clock_source.store(source, std::memory_order_release);
+}
+
+#else
+
+int64_t CycleClock::Now() {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+}
+
+double CycleClock::Frequency() {
+ return 1e9;
+}
+
+#endif
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.h b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.h
index 20dfaca948..a18b584445 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/cycleclock.h
@@ -1,94 +1,94 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// -----------------------------------------------------------------------------
-// File: cycleclock.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines a `CycleClock`, which yields the value and frequency
-// of a cycle counter that increments at a rate that is approximately constant.
-//
-// NOTE:
-//
-// The cycle counter frequency is not necessarily related to the core clock
-// frequency and should not be treated as such. That is, `CycleClock` cycles are
-// not necessarily "CPU cycles" and code should not rely on that behavior, even
-// if experimentally observed.
-//
-// An arbitrary offset may have been added to the counter at power on.
-//
-// On some platforms, the rate and offset of the counter may differ
-// slightly when read from different CPUs of a multiprocessor. Usually,
-// we try to ensure that the operating system adjusts values periodically
-// so that values agree approximately. If you need stronger guarantees,
-// consider using alternate interfaces.
-//
-// The CPU is not required to maintain the ordering of a cycle counter read
-// with respect to surrounding instructions.
-
-#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
-#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
-
-#include <cstdint>
-
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately. If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <cstdint>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// -----------------------------------------------------------------------------
-// CycleClock
-// -----------------------------------------------------------------------------
-class CycleClock {
- public:
- // CycleClock::Now()
- //
- // Returns the value of a cycle counter that counts at a rate that is
- // approximately constant.
- static int64_t Now();
-
- // CycleClock::Frequency()
- //
- // Returns the amount by which `CycleClock::Now()` increases per second. Note
- // that this value may not necessarily match the core CPU clock frequency.
- static double Frequency();
-
- private:
- CycleClock() = delete; // no instances
- CycleClock(const CycleClock&) = delete;
- CycleClock& operator=(const CycleClock&) = delete;
-};
-
-using CycleClockSourceFunc = int64_t (*)();
-
-class CycleClockSource {
- private:
- // CycleClockSource::Register()
- //
- // Register a function that provides an alternate source for the unscaled CPU
- // cycle count value. The source function must be async signal safe, must not
- // call CycleClock::Now(), and must have a frequency that matches that of the
- // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
- // the default source.
- static void Register(CycleClockSourceFunc source);
-};
-
-} // namespace base_internal
+namespace base_internal {
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+ // CycleClock::Now()
+ //
+ // Returns the value of a cycle counter that counts at a rate that is
+ // approximately constant.
+ static int64_t Now();
+
+ // CycleClock::Frequency()
+ //
+ // Returns the amount by which `CycleClock::Now()` increases per second. Note
+ // that this value may not necessarily match the core CPU clock frequency.
+ static double Frequency();
+
+ private:
+ CycleClock() = delete; // no instances
+ CycleClock(const CycleClock&) = delete;
+ CycleClock& operator=(const CycleClock&) = delete;
+};
+
+using CycleClockSourceFunc = int64_t (*)();
+
+class CycleClockSource {
+ private:
+ // CycleClockSource::Register()
+ //
+ // Register a function that provides an alternate source for the unscaled CPU
+ // cycle count value. The source function must be async signal safe, must not
+ // call CycleClock::Now(), and must have a frequency that matches that of the
+ // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
+ // the default source.
+ static void Register(CycleClockSourceFunc source);
+};
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
index b6bcb218db..274054cd5a 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/direct_mmap.h
@@ -1,169 +1,169 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Functions for directly invoking mmap() via syscall, avoiding the case where
-// mmap() has been locally overridden.
-
-#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
-#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
-
-#include "absl/base/config.h"
-
-#if ABSL_HAVE_MMAP
-
-#include <sys/mman.h>
-
-#ifdef __linux__
-
-#include <sys/types.h>
-#ifdef __BIONIC__
-#include <sys/syscall.h>
-#else
-#include <syscall.h>
-#endif
-
-#include <linux/unistd.h>
-#include <unistd.h>
-#include <cerrno>
-#include <cstdarg>
-#include <cstdint>
-
-#ifdef __mips__
-// Include definitions of the ABI currently in use.
-#ifdef __BIONIC__
-// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
-// definitions we need.
-#include <asm/sgidefs.h>
-#else
-#include <sgidefs.h>
-#endif // __BIONIC__
-#endif // __mips__
-
-// SYS_mmap and SYS_munmap are not defined in Android.
-#ifdef __BIONIC__
-extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
-#if defined(__NR_mmap) && !defined(SYS_mmap)
-#define SYS_mmap __NR_mmap
-#endif
-#ifndef SYS_munmap
-#define SYS_munmap __NR_munmap
-#endif
-#endif // __BIONIC__
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "absl/base/config.h"
+
+#if ABSL_HAVE_MMAP
+
+#include <sys/mman.h>
+
+#ifdef __linux__
+
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#ifdef __BIONIC__
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif // __BIONIC__
+#endif // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif // __BIONIC__
+
#if defined(__NR_mmap2) && !defined(SYS_mmap2)
#define SYS_mmap2 __NR_mmap2
#endif
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// Platform specific logic extracted from
-// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
-inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
- off64_t offset) noexcept {
-#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+ off64_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
defined(__m68k__) || defined(__sh__) || \
(defined(__hppa__) && !defined(__LP64__)) || \
- (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
- (defined(__PPC__) && !defined(__PPC64__)) || \
+ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
+ (defined(__PPC__) && !defined(__PPC64__)) || \
(defined(__riscv) && __riscv_xlen == 32) || \
(defined(__s390__) && !defined(__s390x__)) || \
(defined(__sparc__) && !defined(__arch64__))
- // On these architectures, implement mmap with mmap2.
- static int pagesize = 0;
- if (pagesize == 0) {
-#if defined(__wasm__) || defined(__asmjs__)
- pagesize = getpagesize();
-#else
- pagesize = sysconf(_SC_PAGESIZE);
-#endif
- }
- if (offset < 0 || offset % pagesize != 0) {
- errno = EINVAL;
- return MAP_FAILED;
- }
-#ifdef __BIONIC__
- // SYS_mmap2 has problems on Android API level <= 16.
- // Workaround by invoking __mmap2() instead.
- return __mmap2(start, length, prot, flags, fd, offset / pagesize);
-#else
- return reinterpret_cast<void*>(
- syscall(SYS_mmap2, start, length, prot, flags, fd,
- static_cast<off_t>(offset / pagesize)));
-#endif
-#elif defined(__s390x__)
- // On s390x, mmap() arguments are passed in memory.
- unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
- static_cast<unsigned long>(length), // NOLINT
- static_cast<unsigned long>(prot), // NOLINT
- static_cast<unsigned long>(flags), // NOLINT
- static_cast<unsigned long>(fd), // NOLINT
- static_cast<unsigned long>(offset)}; // NOLINT
- return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
-#elif defined(__x86_64__)
-// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
-// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
-// sign extension. We can't cast pointers directly because those are
-// 32 bits, and gcc will dump ugly warnings about casting from a pointer
-// to an integer of a different size. We also need to make sure __off64_t
-// isn't truncated to 32-bits under x32.
-#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
- return reinterpret_cast<void*>(
- syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
- MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
- MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
-#undef MMAP_SYSCALL_ARG
-#else // Remaining 64-bit aritectures.
- static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
- return reinterpret_cast<void*>(
- syscall(SYS_mmap, start, length, prot, flags, fd, offset));
-#endif
-}
-
-inline int DirectMunmap(void* start, size_t length) {
- return static_cast<int>(syscall(SYS_munmap, start, length));
-}
-
-} // namespace base_internal
+ // On these architectures, implement mmap with mmap2.
+ static int pagesize = 0;
+ if (pagesize == 0) {
+#if defined(__wasm__) || defined(__asmjs__)
+ pagesize = getpagesize();
+#else
+ pagesize = sysconf(_SC_PAGESIZE);
+#endif
+ }
+ if (offset < 0 || offset % pagesize != 0) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+#ifdef __BIONIC__
+ // SYS_mmap2 has problems on Android API level <= 16.
+ // Workaround by invoking __mmap2() instead.
+ return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+#else
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap2, start, length, prot, flags, fd,
+ static_cast<off_t>(offset / pagesize)));
+#endif
+#elif defined(__s390x__)
+ // On s390x, mmap() arguments are passed in memory.
+ unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
+ static_cast<unsigned long>(length), // NOLINT
+ static_cast<unsigned long>(prot), // NOLINT
+ static_cast<unsigned long>(flags), // NOLINT
+ static_cast<unsigned long>(fd), // NOLINT
+ static_cast<unsigned long>(offset)}; // NOLINT
+ return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension. We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+ MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+ MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+#undef MMAP_SYSCALL_ARG
+#else // Remaining 64-bit aritectures.
+ static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+ return reinterpret_cast<void*>(
+ syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+ return static_cast<int>(syscall(SYS_munmap, start, length));
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#else // !__linux__
-
-// For non-linux platforms where we have mmap, just dispatch directly to the
-// actual mmap()/munmap() methods.
-
-namespace absl {
+} // namespace absl
+
+#else // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
- off_t offset) {
- return mmap(start, length, prot, flags, fd, offset);
-}
-
-inline int DirectMunmap(void* start, size_t length) {
- return munmap(start, length);
-}
-
-} // namespace base_internal
+namespace base_internal {
+
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+ return munmap(start, length);
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // __linux__
-
-#endif // ABSL_HAVE_MMAP
-
-#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+} // namespace absl
+
+#endif // __linux__
+
+#endif // ABSL_HAVE_MMAP
+
+#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
index c0441df3d3..dad0e9aeb0 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/endian.h
@@ -1,179 +1,179 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
-#define ABSL_BASE_INTERNAL_ENDIAN_H_
-
-// The following guarantees declaration of the byte swap functions
-#ifdef _MSC_VER
-#include <stdlib.h> // NOLINT(build/include)
-#elif defined(__FreeBSD__)
-#include <sys/endian.h>
-#elif defined(__GLIBC__)
-#include <byteswap.h> // IWYU pragma: export
-#endif
-
-#include <cstdint>
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+// The following guarantees declaration of the byte swap functions
+#ifdef _MSC_VER
+#include <stdlib.h> // NOLINT(build/include)
+#elif defined(__FreeBSD__)
+#include <sys/endian.h>
+#elif defined(__GLIBC__)
+#include <byteswap.h> // IWYU pragma: export
+#endif
+
+#include <cstdint>
#include "absl/base/casts.h"
-#include "absl/base/config.h"
-#include "absl/base/internal/unaligned_access.h"
-#include "absl/base/port.h"
-
-namespace absl {
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/port.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-// Use compiler byte-swapping intrinsics if they are available. 32-bit
-// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
-// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
-// For simplicity, we enable them all only for GCC 4.8.0 or later.
-#if defined(__clang__) || \
- (defined(__GNUC__) && \
- ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
-inline uint64_t gbswap_64(uint64_t host_int) {
- return __builtin_bswap64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return __builtin_bswap32(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return __builtin_bswap16(host_int);
-}
-
-#elif defined(_MSC_VER)
-inline uint64_t gbswap_64(uint64_t host_int) {
- return _byteswap_uint64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return _byteswap_ulong(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return _byteswap_ushort(host_int);
-}
-
-#else
-inline uint64_t gbswap_64(uint64_t host_int) {
-#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
- // Adapted from /usr/include/byteswap.h. Not available on Mac.
- if (__builtin_constant_p(host_int)) {
- return __bswap_constant_64(host_int);
- } else {
- uint64_t result;
- __asm__("bswap %0" : "=r"(result) : "0"(host_int));
- return result;
- }
-#elif defined(__GLIBC__)
- return bswap_64(host_int);
-#else
- return (((host_int & uint64_t{0xFF}) << 56) |
- ((host_int & uint64_t{0xFF00}) << 40) |
- ((host_int & uint64_t{0xFF0000}) << 24) |
- ((host_int & uint64_t{0xFF000000}) << 8) |
- ((host_int & uint64_t{0xFF00000000}) >> 8) |
- ((host_int & uint64_t{0xFF0000000000}) >> 24) |
- ((host_int & uint64_t{0xFF000000000000}) >> 40) |
- ((host_int & uint64_t{0xFF00000000000000}) >> 56));
-#endif // bswap_64
-}
-
-inline uint32_t gbswap_32(uint32_t host_int) {
-#if defined(__GLIBC__)
- return bswap_32(host_int);
-#else
- return (((host_int & uint32_t{0xFF}) << 24) |
- ((host_int & uint32_t{0xFF00}) << 8) |
- ((host_int & uint32_t{0xFF0000}) >> 8) |
- ((host_int & uint32_t{0xFF000000}) >> 24));
-#endif
-}
-
-inline uint16_t gbswap_16(uint16_t host_int) {
-#if defined(__GLIBC__)
- return bswap_16(host_int);
-#else
- return (((host_int & uint16_t{0xFF}) << 8) |
- ((host_int & uint16_t{0xFF00}) >> 8));
-#endif
-}
-
-#endif // intrinsics available
-
-#ifdef ABSL_IS_LITTLE_ENDIAN
-
-// Definitions for ntohl etc. that don't require us to include
-// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
-// than just #defining them because in debug mode, gcc doesn't
-// correctly handle the (rather involved) definitions of bswap_32.
-// gcc guarantees that inline functions are as fast as macros, so
-// this isn't a performance hit.
-inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
-inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
-inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
-
-#elif defined ABSL_IS_BIG_ENDIAN
-
-// These definitions are simpler on big-endian machines
-// These are functions instead of macros to avoid self-assignment warnings
-// on calls such as "i = ghtnol(i);". This also provides type checking.
-inline uint16_t ghtons(uint16_t x) { return x; }
-inline uint32_t ghtonl(uint32_t x) { return x; }
-inline uint64_t ghtonll(uint64_t x) { return x; }
-
-#else
-#error \
- "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
- "ABSL_IS_LITTLE_ENDIAN must be defined"
-#endif // byte order
-
-inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
-inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
-inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
-
-// Utilities to convert numbers between the current hosts's native byte
-// order and little-endian byte order
-//
-// Load/Store methods are alignment safe
-namespace little_endian {
-// Conversion functions.
-#ifdef ABSL_IS_LITTLE_ENDIAN
-
-inline uint16_t FromHost16(uint16_t x) { return x; }
-inline uint16_t ToHost16(uint16_t x) { return x; }
-
-inline uint32_t FromHost32(uint32_t x) { return x; }
-inline uint32_t ToHost32(uint32_t x) { return x; }
-
-inline uint64_t FromHost64(uint64_t x) { return x; }
-inline uint64_t ToHost64(uint64_t x) { return x; }
-
-inline constexpr bool IsLittleEndian() { return true; }
-
-#elif defined ABSL_IS_BIG_ENDIAN
-
-inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
-inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
-
-inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
-inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
-
-inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
-inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
-
-inline constexpr bool IsLittleEndian() { return false; }
-
-#endif /* ENDIAN */
-
+
+// Use compiler byte-swapping intrinsics if they are available. 32-bit
+// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
+// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
+// For simplicity, we enable them all only for GCC 4.8.0 or later.
+#if defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
+inline uint64_t gbswap_64(uint64_t host_int) {
+ return __builtin_bswap64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+ return __builtin_bswap32(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+ return __builtin_bswap16(host_int);
+}
+
+#elif defined(_MSC_VER)
+inline uint64_t gbswap_64(uint64_t host_int) {
+ return _byteswap_uint64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+ return _byteswap_ulong(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+ return _byteswap_ushort(host_int);
+}
+
+#else
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
+ // Adapted from /usr/include/byteswap.h. Not available on Mac.
+ if (__builtin_constant_p(host_int)) {
+ return __bswap_constant_64(host_int);
+ } else {
+ uint64_t result;
+ __asm__("bswap %0" : "=r"(result) : "0"(host_int));
+ return result;
+ }
+#elif defined(__GLIBC__)
+ return bswap_64(host_int);
+#else
+ return (((host_int & uint64_t{0xFF}) << 56) |
+ ((host_int & uint64_t{0xFF00}) << 40) |
+ ((host_int & uint64_t{0xFF0000}) << 24) |
+ ((host_int & uint64_t{0xFF000000}) << 8) |
+ ((host_int & uint64_t{0xFF00000000}) >> 8) |
+ ((host_int & uint64_t{0xFF0000000000}) >> 24) |
+ ((host_int & uint64_t{0xFF000000000000}) >> 40) |
+ ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+#endif // bswap_64
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if defined(__GLIBC__)
+ return bswap_32(host_int);
+#else
+ return (((host_int & uint32_t{0xFF}) << 24) |
+ ((host_int & uint32_t{0xFF00}) << 8) |
+ ((host_int & uint32_t{0xFF0000}) >> 8) |
+ ((host_int & uint32_t{0xFF000000}) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if defined(__GLIBC__)
+ return bswap_16(host_int);
+#else
+ return (((host_int & uint16_t{0xFF}) << 8) |
+ ((host_int & uint16_t{0xFF00}) >> 8));
+#endif
+}
+
+#endif // intrinsics available
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Definitions for ntohl etc. that don't require us to include
+// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
+// than just #defining them because in debug mode, gcc doesn't
+// correctly handle the (rather involved) definitions of bswap_32.
+// gcc guarantees that inline functions are as fast as macros, so
+// this isn't a performance hit.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// These definitions are simpler on big-endian machines
+// These are functions instead of macros to avoid self-assignment warnings
+// on calls such as "i = ghtnol(i);". This also provides type checking.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+ "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+ "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
@@ -204,66 +204,66 @@ inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
-// Functions to do unaligned loads and stores in little-endian order.
-inline uint16_t Load16(const void *p) {
- return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
-}
-
-inline void Store16(void *p, uint16_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
-}
-
-inline uint32_t Load32(const void *p) {
- return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
-}
-
-inline void Store32(void *p, uint32_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
-}
-
-inline uint64_t Load64(const void *p) {
- return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
-}
-
-inline void Store64(void *p, uint64_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
-}
-
-} // namespace little_endian
-
-// Utilities to convert numbers between the current hosts's native byte
-// order and big-endian byte order (same as network byte order)
-//
-// Load/Store methods are alignment safe
-namespace big_endian {
-#ifdef ABSL_IS_LITTLE_ENDIAN
-
-inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
-inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
-
-inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
-inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
-
-inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
-inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
-
-inline constexpr bool IsLittleEndian() { return true; }
-
-#elif defined ABSL_IS_BIG_ENDIAN
-
-inline uint16_t FromHost16(uint16_t x) { return x; }
-inline uint16_t ToHost16(uint16_t x) { return x; }
-
-inline uint32_t FromHost32(uint32_t x) { return x; }
-inline uint32_t ToHost32(uint32_t x) { return x; }
-
-inline uint64_t FromHost64(uint64_t x) { return x; }
-inline uint64_t ToHost64(uint64_t x) { return x; }
-
-inline constexpr bool IsLittleEndian() { return false; }
-
-#endif /* ENDIAN */
-
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(const void *p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
@@ -294,34 +294,34 @@ inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
-// Functions to do unaligned loads and stores in big-endian order.
-inline uint16_t Load16(const void *p) {
- return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
-}
-
-inline void Store16(void *p, uint16_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
-}
-
-inline uint32_t Load32(const void *p) {
- return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
-}
-
-inline void Store32(void *p, uint32_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
-}
-
-inline uint64_t Load64(const void *p) {
- return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
-}
-
-inline void Store64(void *p, uint64_t v) {
- ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
-}
-
-} // namespace big_endian
-
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(const void *p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace big_endian
+
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_ENDIAN_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ENDIAN_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/exception_safety_testing.h b/contrib/restricted/abseil-cpp/absl/base/internal/exception_safety_testing.h
index f6855c1c3a..77a5aec642 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/exception_safety_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/exception_safety_testing.h
@@ -1,541 +1,541 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Utilities for testing exception-safety
-
-#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
-#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
-
-#include "absl/base/config.h"
-
-#ifdef ABSL_HAVE_EXCEPTIONS
-
-#include <cstddef>
-#include <cstdint>
-#include <functional>
-#include <initializer_list>
-#include <iosfwd>
-#include <string>
-#include <tuple>
-#include <unordered_map>
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/pretty_function.h"
-#include "absl/memory/memory.h"
-#include "absl/meta/type_traits.h"
-#include "absl/strings/string_view.h"
-#include "absl/strings/substitute.h"
-#include "absl/utility/utility.h"
-
-namespace testing {
-
-enum class TypeSpec;
-enum class AllocSpec;
-
-constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
- using T = absl::underlying_type_t<TypeSpec>;
- return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b));
-}
-
-constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
- using T = absl::underlying_type_t<TypeSpec>;
- return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b));
-}
-
-constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
- using T = absl::underlying_type_t<AllocSpec>;
- return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b));
-}
-
-constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
- using T = absl::underlying_type_t<AllocSpec>;
- return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b));
-}
-
-namespace exceptions_internal {
-
-std::string GetSpecString(TypeSpec);
-std::string GetSpecString(AllocSpec);
-
-struct NoThrowTag {};
-struct StrongGuaranteeTagType {};
-
-// A simple exception class. We throw this so that test code can catch
-// exceptions specifically thrown by ThrowingValue.
-class TestException {
- public:
- explicit TestException(absl::string_view msg) : msg_(msg) {}
- virtual ~TestException() {}
- virtual const char* what() const noexcept { return msg_.c_str(); }
-
- private:
- std::string msg_;
-};
-
-// TestBadAllocException exists because allocation functions must throw an
-// exception which can be caught by a handler of std::bad_alloc. We use a child
-// class of std::bad_alloc so we can customise the error message, and also
-// derive from TestException so we don't accidentally end up catching an actual
-// bad_alloc exception in TestExceptionSafety.
-class TestBadAllocException : public std::bad_alloc, public TestException {
- public:
- explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
- using TestException::what;
-};
-
-extern int countdown;
-
-// Allows the countdown variable to be set manually (defaulting to the initial
-// value of 0)
-inline void SetCountdown(int i = 0) { countdown = i; }
-// Sets the countdown to the terminal value -1
-inline void UnsetCountdown() { SetCountdown(-1); }
-
-void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
-
-testing::AssertionResult FailureMessage(const TestException& e,
- int countdown) noexcept;
-
-struct TrackedAddress {
- bool is_alive;
- std::string description;
-};
-
-// Inspects the constructions and destructions of anything inheriting from
-// TrackedObject. This allows us to safely "leak" TrackedObjects, as
-// ConstructorTracker will destroy everything left over in its destructor.
-class ConstructorTracker {
- public:
- explicit ConstructorTracker(int count) : countdown_(count) {
- assert(current_tracker_instance_ == nullptr);
- current_tracker_instance_ = this;
- }
-
- ~ConstructorTracker() {
- assert(current_tracker_instance_ == this);
- current_tracker_instance_ = nullptr;
-
- for (auto& it : address_map_) {
- void* address = it.first;
- TrackedAddress& tracked_address = it.second;
- if (tracked_address.is_alive) {
- ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
- countdown_, "Object was not destroyed.");
- }
- }
- }
-
- static void ObjectConstructed(void* address, std::string description) {
- if (!CurrentlyTracking()) return;
-
- TrackedAddress& tracked_address =
- current_tracker_instance_->address_map_[address];
- if (tracked_address.is_alive) {
- ADD_FAILURE() << ErrorMessage(
- address, tracked_address.description,
- current_tracker_instance_->countdown_,
- "Object was re-constructed. Current object was constructed by " +
- description);
- }
- tracked_address = {true, std::move(description)};
- }
-
- static void ObjectDestructed(void* address) {
- if (!CurrentlyTracking()) return;
-
- auto it = current_tracker_instance_->address_map_.find(address);
- // Not tracked. Ignore.
- if (it == current_tracker_instance_->address_map_.end()) return;
-
- TrackedAddress& tracked_address = it->second;
- if (!tracked_address.is_alive) {
- ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
- current_tracker_instance_->countdown_,
- "Object was re-destroyed.");
- }
- tracked_address.is_alive = false;
- }
-
- private:
- static bool CurrentlyTracking() {
- return current_tracker_instance_ != nullptr;
- }
-
- static std::string ErrorMessage(void* address,
- const std::string& address_description,
- int countdown,
- const std::string& error_description) {
- return absl::Substitute(
- "With coundtown at $0:\n"
- " $1\n"
- " Object originally constructed by $2\n"
- " Object address: $3\n",
- countdown, error_description, address_description, address);
- }
-
- std::unordered_map<void*, TrackedAddress> address_map_;
- int countdown_;
-
- static ConstructorTracker* current_tracker_instance_;
-};
-
-class TrackedObject {
- public:
- TrackedObject(const TrackedObject&) = delete;
- TrackedObject(TrackedObject&&) = delete;
-
- protected:
- explicit TrackedObject(std::string description) {
- ConstructorTracker::ObjectConstructed(this, std::move(description));
- }
-
- ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
-};
-} // namespace exceptions_internal
-
-extern exceptions_internal::NoThrowTag nothrow_ctor;
-
-extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
-
-// A test class which is convertible to bool. The conversion can be
-// instrumented to throw at a controlled time.
-class ThrowingBool {
- public:
- ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
- operator bool() const { // NOLINT
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return b_;
- }
-
- private:
- bool b_;
-};
-
-/*
- * Configuration enum for the ThrowingValue type that defines behavior for the
- * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
- * constructor from throwing.
- *
- * kEverythingThrows: Every operation can throw an exception
- * kNoThrowCopy: Copy construction and copy assignment will not throw
- * kNoThrowMove: Move construction and move assignment will not throw
- * kNoThrowNew: Overloaded operators new and new[] will not throw
- */
-enum class TypeSpec {
- kEverythingThrows = 0,
- kNoThrowCopy = 1,
- kNoThrowMove = 1 << 1,
- kNoThrowNew = 1 << 2,
-};
-
-/*
- * A testing class instrumented to throw an exception at a controlled time.
- *
- * ThrowingValue implements a slightly relaxed version of the Regular concept --
- * that is it's a value type with the expected semantics. It also implements
- * arithmetic operations. It doesn't implement member and pointer operators
- * like operator-> or operator[].
- *
- * ThrowingValue can be instrumented to have certain operations be noexcept by
- * using compile-time bitfield template arguments. That is, to make an
- * ThrowingValue which has noexcept move construction/assignment and noexcept
- * copy construction/assignment, use the following:
- * ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val};
- */
-template <TypeSpec Spec = TypeSpec::kEverythingThrows>
-class ThrowingValue : private exceptions_internal::TrackedObject {
- static constexpr bool IsSpecified(TypeSpec spec) {
- return static_cast<bool>(Spec & spec);
- }
-
- static constexpr int kDefaultValue = 0;
- static constexpr int kBadValue = 938550620;
-
- public:
- ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ = kDefaultValue;
- }
-
- ThrowingValue(const ThrowingValue& other) noexcept(
- IsSpecified(TypeSpec::kNoThrowCopy))
- : TrackedObject(GetInstanceString(other.dummy_)) {
- if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- }
- dummy_ = other.dummy_;
- }
-
- ThrowingValue(ThrowingValue&& other) noexcept(
- IsSpecified(TypeSpec::kNoThrowMove))
- : TrackedObject(GetInstanceString(other.dummy_)) {
- if (!IsSpecified(TypeSpec::kNoThrowMove)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- }
- dummy_ = other.dummy_;
- }
-
- explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ = i;
- }
-
- ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
- : TrackedObject(GetInstanceString(i)), dummy_(i) {}
-
- // absl expects nothrow destructors
- ~ThrowingValue() noexcept = default;
-
- ThrowingValue& operator=(const ThrowingValue& other) noexcept(
- IsSpecified(TypeSpec::kNoThrowCopy)) {
- dummy_ = kBadValue;
- if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- }
- dummy_ = other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator=(ThrowingValue&& other) noexcept(
- IsSpecified(TypeSpec::kNoThrowMove)) {
- dummy_ = kBadValue;
- if (!IsSpecified(TypeSpec::kNoThrowMove)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- }
- dummy_ = other.dummy_;
- return *this;
- }
-
- // Arithmetic Operators
- ThrowingValue operator+(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator+() const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator-(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator-() const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(-dummy_, nothrow_ctor);
- }
-
- ThrowingValue& operator++() {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- ++dummy_;
- return *this;
- }
-
- ThrowingValue operator++(int) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- auto out = ThrowingValue(dummy_, nothrow_ctor);
- ++dummy_;
- return out;
- }
-
- ThrowingValue& operator--() {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- --dummy_;
- return *this;
- }
-
- ThrowingValue operator--(int) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- auto out = ThrowingValue(dummy_, nothrow_ctor);
- --dummy_;
- return out;
- }
-
- ThrowingValue operator*(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator/(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator%(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator<<(int shift) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ << shift, nothrow_ctor);
- }
-
- ThrowingValue operator>>(int shift) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ >> shift, nothrow_ctor);
- }
-
- // Comparison Operators
- // NOTE: We use `ThrowingBool` instead of `bool` because most STL
- // types/containers requires T to be convertible to bool.
- friend ThrowingBool operator==(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ == b.dummy_;
- }
- friend ThrowingBool operator!=(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ != b.dummy_;
- }
- friend ThrowingBool operator<(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ < b.dummy_;
- }
- friend ThrowingBool operator<=(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ <= b.dummy_;
- }
- friend ThrowingBool operator>(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ > b.dummy_;
- }
- friend ThrowingBool operator>=(const ThrowingValue& a,
- const ThrowingValue& b) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return a.dummy_ >= b.dummy_;
- }
-
- // Logical Operators
- ThrowingBool operator!() const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return !dummy_;
- }
-
- ThrowingBool operator&&(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return dummy_ && other.dummy_;
- }
-
- ThrowingBool operator||(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return dummy_ || other.dummy_;
- }
-
- // Bitwise Logical Operators
- ThrowingValue operator~() const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(~dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator&(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator|(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
- }
-
- ThrowingValue operator^(const ThrowingValue& other) const {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
- }
-
- // Compound Assignment operators
- ThrowingValue& operator+=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ += other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator-=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ -= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator*=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ *= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator/=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ /= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator%=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ %= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator&=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ &= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator|=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ |= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator^=(const ThrowingValue& other) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ ^= other.dummy_;
- return *this;
- }
-
- ThrowingValue& operator<<=(int shift) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ <<= shift;
- return *this;
- }
-
- ThrowingValue& operator>>=(int shift) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ >>= shift;
- return *this;
- }
-
- // Pointer operators
- void operator&() const = delete; // NOLINT(runtime/operator)
-
- // Stream operators
- friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return os << GetInstanceString(tv.dummy_);
- }
-
- friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- return is;
- }
-
- // Memory management operators
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/utility/utility.h"
+
+namespace testing {
+
+enum class TypeSpec;
+enum class AllocSpec;
+
+constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
+ using T = absl::underlying_type_t<TypeSpec>;
+ return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
+ using T = absl::underlying_type_t<TypeSpec>;
+ return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
+ using T = absl::underlying_type_t<AllocSpec>;
+ return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
+ using T = absl::underlying_type_t<AllocSpec>;
+ return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+
+std::string GetSpecString(TypeSpec);
+std::string GetSpecString(AllocSpec);
+
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+// A simple exception class. We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+ explicit TestException(absl::string_view msg) : msg_(msg) {}
+ virtual ~TestException() {}
+ virtual const char* what() const noexcept { return msg_.c_str(); }
+
+ private:
+ std::string msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc. We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+ public:
+ explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
+ using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+ int countdown) noexcept;
+
+struct TrackedAddress {
+ bool is_alive;
+ std::string description;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+ public:
+ explicit ConstructorTracker(int count) : countdown_(count) {
+ assert(current_tracker_instance_ == nullptr);
+ current_tracker_instance_ = this;
+ }
+
+ ~ConstructorTracker() {
+ assert(current_tracker_instance_ == this);
+ current_tracker_instance_ = nullptr;
+
+ for (auto& it : address_map_) {
+ void* address = it.first;
+ TrackedAddress& tracked_address = it.second;
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ countdown_, "Object was not destroyed.");
+ }
+ }
+ }
+
+ static void ObjectConstructed(void* address, std::string description) {
+ if (!CurrentlyTracking()) return;
+
+ TrackedAddress& tracked_address =
+ current_tracker_instance_->address_map_[address];
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(
+ address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-constructed. Current object was constructed by " +
+ description);
+ }
+ tracked_address = {true, std::move(description)};
+ }
+
+ static void ObjectDestructed(void* address) {
+ if (!CurrentlyTracking()) return;
+
+ auto it = current_tracker_instance_->address_map_.find(address);
+ // Not tracked. Ignore.
+ if (it == current_tracker_instance_->address_map_.end()) return;
+
+ TrackedAddress& tracked_address = it->second;
+ if (!tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-destroyed.");
+ }
+ tracked_address.is_alive = false;
+ }
+
+ private:
+ static bool CurrentlyTracking() {
+ return current_tracker_instance_ != nullptr;
+ }
+
+ static std::string ErrorMessage(void* address,
+ const std::string& address_description,
+ int countdown,
+ const std::string& error_description) {
+ return absl::Substitute(
+ "With coundtown at $0:\n"
+ " $1\n"
+ " Object originally constructed by $2\n"
+ " Object address: $3\n",
+ countdown, error_description, address_description, address);
+ }
+
+ std::unordered_map<void*, TrackedAddress> address_map_;
+ int countdown_;
+
+ static ConstructorTracker* current_tracker_instance_;
+};
+
+class TrackedObject {
+ public:
+ TrackedObject(const TrackedObject&) = delete;
+ TrackedObject(TrackedObject&&) = delete;
+
+ protected:
+ explicit TrackedObject(std::string description) {
+ ConstructorTracker::ObjectConstructed(this, std::move(description));
+ }
+
+ ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
+};
+} // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag nothrow_ctor;
+
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool. The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+ ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
+ operator bool() const { // NOLINT
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return b_;
+ }
+
+ private:
+ bool b_;
+};
+
+/*
+ * Configuration enum for the ThrowingValue type that defines behavior for the
+ * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
+ * constructor from throwing.
+ *
+ * kEverythingThrows: Every operation can throw an exception
+ * kNoThrowCopy: Copy construction and copy assignment will not throw
+ * kNoThrowMove: Move construction and move assignment will not throw
+ * kNoThrowNew: Overloaded operators new and new[] will not throw
+ */
+enum class TypeSpec {
+ kEverythingThrows = 0,
+ kNoThrowCopy = 1,
+ kNoThrowMove = 1 << 1,
+ kNoThrowNew = 1 << 2,
+};
+
+/*
+ * A testing class instrumented to throw an exception at a controlled time.
+ *
+ * ThrowingValue implements a slightly relaxed version of the Regular concept --
+ * that is it's a value type with the expected semantics. It also implements
+ * arithmetic operations. It doesn't implement member and pointer operators
+ * like operator-> or operator[].
+ *
+ * ThrowingValue can be instrumented to have certain operations be noexcept by
+ * using compile-time bitfield template arguments. That is, to make an
+ * ThrowingValue which has noexcept move construction/assignment and noexcept
+ * copy construction/assignment, use the following:
+ * ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val};
+ */
+template <TypeSpec Spec = TypeSpec::kEverythingThrows>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(TypeSpec spec) {
+ return static_cast<bool>(Spec & spec);
+ }
+
+ static constexpr int kDefaultValue = 0;
+ static constexpr int kBadValue = 938550620;
+
+ public:
+ ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = kDefaultValue;
+ }
+
+ ThrowingValue(const ThrowingValue& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ ThrowingValue(ThrowingValue&& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = i;
+ }
+
+ ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+ : TrackedObject(GetInstanceString(i)), dummy_(i) {}
+
+ // absl expects nothrow destructors
+ ~ThrowingValue() noexcept = default;
+
+ ThrowingValue& operator=(const ThrowingValue& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ // Arithmetic Operators
+ ThrowingValue operator+(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator+() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(-dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue& operator++() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ ++dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator++(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ ++dummy_;
+ return out;
+ }
+
+ ThrowingValue& operator--() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ --dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator--(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ --dummy_;
+ return out;
+ }
+
+ ThrowingValue operator*(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator/(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator%(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator<<(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ << shift, nothrow_ctor);
+ }
+
+ ThrowingValue operator>>(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ >> shift, nothrow_ctor);
+ }
+
+ // Comparison Operators
+ // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+ // types/containers requires T to be convertible to bool.
+ friend ThrowingBool operator==(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ == b.dummy_;
+ }
+ friend ThrowingBool operator!=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ != b.dummy_;
+ }
+ friend ThrowingBool operator<(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ < b.dummy_;
+ }
+ friend ThrowingBool operator<=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ <= b.dummy_;
+ }
+ friend ThrowingBool operator>(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ > b.dummy_;
+ }
+ friend ThrowingBool operator>=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ >= b.dummy_;
+ }
+
+ // Logical Operators
+ ThrowingBool operator!() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return !dummy_;
+ }
+
+ ThrowingBool operator&&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ && other.dummy_;
+ }
+
+ ThrowingBool operator||(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ || other.dummy_;
+ }
+
+ // Bitwise Logical Operators
+ ThrowingValue operator~() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(~dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator|(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator^(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
+ }
+
+ // Compound Assignment operators
+ ThrowingValue& operator+=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ += other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator-=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ -= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator*=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ *= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator/=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ /= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator%=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ %= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator&=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ &= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator|=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ |= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator^=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ ^= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator<<=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ <<= shift;
+ return *this;
+ }
+
+ ThrowingValue& operator>>=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ >>= shift;
+ return *this;
+ }
+
+ // Pointer operators
+ void operator&() const = delete; // NOLINT(runtime/operator)
+
+ // Stream operators
+ friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return os << GetInstanceString(tv.dummy_);
+ }
+
+ friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return is;
+ }
+
+ // Memory management operators
static void* operator new(size_t s) noexcept(
IsSpecified(TypeSpec::kNoThrowNew)) {
if (!IsSpecified(TypeSpec::kNoThrowNew)) {
@@ -552,558 +552,558 @@ class ThrowingValue : private exceptions_internal::TrackedObject {
return ::operator new[](s);
}
- template <typename... Args>
- static void* operator new(size_t s, Args&&... args) noexcept(
- IsSpecified(TypeSpec::kNoThrowNew)) {
- if (!IsSpecified(TypeSpec::kNoThrowNew)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
- }
- return ::operator new(s, std::forward<Args>(args)...);
- }
-
- template <typename... Args>
- static void* operator new[](size_t s, Args&&... args) noexcept(
- IsSpecified(TypeSpec::kNoThrowNew)) {
- if (!IsSpecified(TypeSpec::kNoThrowNew)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
- }
- return ::operator new[](s, std::forward<Args>(args)...);
- }
-
- // Abseil doesn't support throwing overloaded operator delete. These are
- // provided so a throwing operator-new can clean up after itself.
- void operator delete(void* p) noexcept { ::operator delete(p); }
-
- template <typename... Args>
- void operator delete(void* p, Args&&... args) noexcept {
- ::operator delete(p, std::forward<Args>(args)...);
- }
-
- void operator delete[](void* p) noexcept { return ::operator delete[](p); }
-
- template <typename... Args>
- void operator delete[](void* p, Args&&... args) noexcept {
- return ::operator delete[](p, std::forward<Args>(args)...);
- }
-
- // Non-standard access to the actual contained value. No need for this to
- // throw.
- int& Get() noexcept { return dummy_; }
- const int& Get() const noexcept { return dummy_; }
-
- private:
- static std::string GetInstanceString(int dummy) {
- return absl::StrCat("ThrowingValue<",
- exceptions_internal::GetSpecString(Spec), ">(", dummy,
- ")");
- }
-
- int dummy_;
-};
-// While not having to do with exceptions, explicitly delete comma operator, to
-// make sure we don't use it on user-supplied types.
-template <TypeSpec Spec, typename T>
-void operator,(const ThrowingValue<Spec>&, T&&) = delete;
-template <TypeSpec Spec, typename T>
-void operator,(T&&, const ThrowingValue<Spec>&) = delete;
-
-/*
- * Configuration enum for the ThrowingAllocator type that defines behavior for
- * the lifetime of the instance.
- *
- * kEverythingThrows: Calls to the member functions may throw
- * kNoThrowAllocate: Calls to the member functions will not throw
- */
-enum class AllocSpec {
- kEverythingThrows = 0,
- kNoThrowAllocate = 1,
-};
-
-/*
- * An allocator type which is instrumented to throw at a controlled time, or not
- * to throw, using AllocSpec. The supported settings are the default of every
- * function which is allowed to throw in a conforming allocator possibly
- * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
- * configuration macro.
- */
-template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows>
-class ThrowingAllocator : private exceptions_internal::TrackedObject {
- static constexpr bool IsSpecified(AllocSpec spec) {
- return static_cast<bool>(Spec & spec);
- }
-
- public:
- using pointer = T*;
- using const_pointer = const T*;
- using reference = T&;
- using const_reference = const T&;
- using void_pointer = void*;
- using const_void_pointer = const void*;
- using value_type = T;
- using size_type = size_t;
- using difference_type = ptrdiff_t;
-
- using is_nothrow =
- std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>;
- using propagate_on_container_copy_assignment = std::true_type;
- using propagate_on_container_move_assignment = std::true_type;
- using propagate_on_container_swap = std::true_type;
- using is_always_equal = std::false_type;
-
- ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
- exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
- dummy_ = std::make_shared<const int>(next_id_++);
- }
-
- template <typename U>
- ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept // NOLINT
- : TrackedObject(GetInstanceString(*other.State())),
- dummy_(other.State()) {}
-
- // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
- // allocator shall not exit via an exception, thus they are marked noexcept.
- ThrowingAllocator(const ThrowingAllocator& other) noexcept
- : TrackedObject(GetInstanceString(*other.State())),
- dummy_(other.State()) {}
-
- template <typename U>
- ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept // NOLINT
- : TrackedObject(GetInstanceString(*other.State())),
- dummy_(std::move(other.State())) {}
-
- ThrowingAllocator(ThrowingAllocator&& other) noexcept
- : TrackedObject(GetInstanceString(*other.State())),
- dummy_(std::move(other.State())) {}
-
- ~ThrowingAllocator() noexcept = default;
-
- ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
- dummy_ = other.State();
- return *this;
- }
-
- template <typename U>
- ThrowingAllocator& operator=(
- const ThrowingAllocator<U, Spec>& other) noexcept {
- dummy_ = other.State();
- return *this;
- }
-
- template <typename U>
- ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept {
- dummy_ = std::move(other.State());
- return *this;
- }
-
- template <typename U>
- struct rebind {
- using other = ThrowingAllocator<U, Spec>;
- };
-
- pointer allocate(size_type n) noexcept(
- IsSpecified(AllocSpec::kNoThrowAllocate)) {
- ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
- return static_cast<pointer>(::operator new(n * sizeof(T)));
- }
-
- pointer allocate(size_type n, const_void_pointer) noexcept(
- IsSpecified(AllocSpec::kNoThrowAllocate)) {
- return allocate(n);
- }
-
- void deallocate(pointer ptr, size_type) noexcept {
- ReadState();
- ::operator delete(static_cast<void*>(ptr));
- }
-
- template <typename U, typename... Args>
- void construct(U* ptr, Args&&... args) noexcept(
- IsSpecified(AllocSpec::kNoThrowAllocate)) {
- ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
- ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
- }
-
- template <typename U>
- void destroy(U* p) noexcept {
- ReadState();
- p->~U();
- }
-
- size_type max_size() const noexcept {
- return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
- }
-
- ThrowingAllocator select_on_container_copy_construction() noexcept(
- IsSpecified(AllocSpec::kNoThrowAllocate)) {
- ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ template <typename... Args>
+ static void* operator new(size_t s, Args&&... args) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new(s, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ static void* operator new[](size_t s, Args&&... args) noexcept(
+ IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new[](s, std::forward<Args>(args)...);
+ }
+
+ // Abseil doesn't support throwing overloaded operator delete. These are
+ // provided so a throwing operator-new can clean up after itself.
+ void operator delete(void* p) noexcept { ::operator delete(p); }
+
+ template <typename... Args>
+ void operator delete(void* p, Args&&... args) noexcept {
+ ::operator delete(p, std::forward<Args>(args)...);
+ }
+
+ void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+ template <typename... Args>
+ void operator delete[](void* p, Args&&... args) noexcept {
+ return ::operator delete[](p, std::forward<Args>(args)...);
+ }
+
+ // Non-standard access to the actual contained value. No need for this to
+ // throw.
+ int& Get() noexcept { return dummy_; }
+ const int& Get() const noexcept { return dummy_; }
+
+ private:
+ static std::string GetInstanceString(int dummy) {
+ return absl::StrCat("ThrowingValue<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <TypeSpec Spec, typename T>
+void operator,(const ThrowingValue<Spec>&, T&&) = delete;
+template <TypeSpec Spec, typename T>
+void operator,(T&&, const ThrowingValue<Spec>&) = delete;
+
+/*
+ * Configuration enum for the ThrowingAllocator type that defines behavior for
+ * the lifetime of the instance.
+ *
+ * kEverythingThrows: Calls to the member functions may throw
+ * kNoThrowAllocate: Calls to the member functions will not throw
+ */
+enum class AllocSpec {
+ kEverythingThrows = 0,
+ kNoThrowAllocate = 1,
+};
+
+/*
+ * An allocator type which is instrumented to throw at a controlled time, or not
+ * to throw, using AllocSpec. The supported settings are the default of every
+ * function which is allowed to throw in a conforming allocator possibly
+ * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+ * configuration macro.
+ */
+template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(AllocSpec spec) {
+ return static_cast<bool>(Spec & spec);
+ }
+
+ public:
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using void_pointer = void*;
+ using const_void_pointer = const void*;
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ using is_nothrow =
+ std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>;
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ using is_always_equal = std::false_type;
+
+ ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = std::make_shared<const int>(next_id_++);
+ }
+
+ template <typename U>
+ ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+ // allocator shall not exit via an exception, thus they are marked noexcept.
+ ThrowingAllocator(const ThrowingAllocator& other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ template <typename U>
+ ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ThrowingAllocator(ThrowingAllocator&& other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ~ThrowingAllocator() noexcept = default;
+
+ ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template <typename U>
+ ThrowingAllocator& operator=(
+ const ThrowingAllocator<U, Spec>& other) noexcept {
+ dummy_ = other.State();
return *this;
- }
-
- template <typename U>
- bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept {
- return dummy_ == other.dummy_;
- }
-
- template <typename U>
- bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept {
- return dummy_ != other.dummy_;
- }
-
- template <typename, AllocSpec>
- friend class ThrowingAllocator;
-
- private:
- static std::string GetInstanceString(int dummy) {
- return absl::StrCat("ThrowingAllocator<",
- exceptions_internal::GetSpecString(Spec), ">(", dummy,
- ")");
- }
-
- const std::shared_ptr<const int>& State() const { return dummy_; }
- std::shared_ptr<const int>& State() { return dummy_; }
-
- void ReadState() {
- // we know that this will never be true, but the compiler doesn't, so this
- // should safely force a read of the value.
- if (*dummy_ < 0) std::abort();
- }
-
- void ReadStateAndMaybeThrow(absl::string_view msg) const {
- if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
- exceptions_internal::MaybeThrow(
- absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
- }
- }
-
- static int next_id_;
- std::shared_ptr<const int> dummy_;
-};
-
-template <typename T, AllocSpec Spec>
-int ThrowingAllocator<T, Spec>::next_id_ = 0;
-
-// Tests for resource leaks by attempting to construct a T using args repeatedly
-// until successful, using the countdown method. Side effects can then be
-// tested for resource leaks.
-template <typename T, typename... Args>
-void TestThrowingCtor(Args&&... args) {
- struct Cleanup {
- ~Cleanup() { exceptions_internal::UnsetCountdown(); }
- } c;
- for (int count = 0;; ++count) {
- exceptions_internal::ConstructorTracker ct(count);
- exceptions_internal::SetCountdown(count);
- try {
- T temp(std::forward<Args>(args)...);
- static_cast<void>(temp);
- break;
- } catch (const exceptions_internal::TestException&) {
- }
- }
-}
-
-// Tests the nothrow guarantee of the provided nullary operation. If the an
-// exception is thrown, the result will be AssertionFailure(). Otherwise, it
-// will be AssertionSuccess().
-template <typename Operation>
-testing::AssertionResult TestNothrowOp(const Operation& operation) {
- struct Cleanup {
- Cleanup() { exceptions_internal::SetCountdown(); }
- ~Cleanup() { exceptions_internal::UnsetCountdown(); }
- } c;
- try {
- operation();
- return testing::AssertionSuccess();
- } catch (const exceptions_internal::TestException&) {
- return testing::AssertionFailure()
- << "TestException thrown during call to operation() when nothrow "
- "guarantee was expected.";
- } catch (...) {
- return testing::AssertionFailure()
- << "Unknown exception thrown during call to operation() when "
- "nothrow guarantee was expected.";
- }
-}
-
-namespace exceptions_internal {
-
-// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
-struct UninitializedT {};
-
-template <typename T>
-class DefaultFactory {
- public:
- explicit DefaultFactory(const T& t) : t_(t) {}
- std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
-
- private:
- T t_;
-};
-
-template <size_t LazyContractsCount, typename LazyFactory,
- typename LazyOperation>
-using EnableIfTestable = typename absl::enable_if_t<
- LazyContractsCount != 0 &&
- !std::is_same<LazyFactory, UninitializedT>::value &&
- !std::is_same<LazyOperation, UninitializedT>::value>;
-
-template <typename Factory = UninitializedT,
- typename Operation = UninitializedT, typename... Contracts>
-class ExceptionSafetyTestBuilder;
-
-} // namespace exceptions_internal
-
-/*
- * Constructs an empty ExceptionSafetyTestBuilder. All
- * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
- * methods return new instances of ExceptionSafetyTestBuilder.
- *
- * In order to test a T for exception safety, a factory for that T, a testable
- * operation, and at least one contract callback returning an assertion
- * result must be applied using the respective methods.
- */
-exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
-
-namespace exceptions_internal {
-template <typename T>
-struct IsUniquePtr : std::false_type {};
-
-template <typename T, typename D>
-struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {};
-
-template <typename Factory>
-struct FactoryPtrTypeHelper {
- using type = decltype(std::declval<const Factory&>()());
-
- static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr");
-};
-
-template <typename Factory>
-using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type;
-
-template <typename Factory>
-using FactoryElementType = typename FactoryPtrType<Factory>::element_type;
-
-template <typename T>
-class ExceptionSafetyTest {
- using Factory = std::function<std::unique_ptr<T>()>;
- using Operation = std::function<void(T*)>;
- using Contract = std::function<AssertionResult(T*)>;
-
- public:
- template <typename... Contracts>
- explicit ExceptionSafetyTest(const Factory& f, const Operation& op,
- const Contracts&... contracts)
- : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
-
- AssertionResult Test() const {
- for (int count = 0;; ++count) {
- exceptions_internal::ConstructorTracker ct(count);
-
- for (const auto& contract : contracts_) {
- auto t_ptr = factory_();
- try {
- SetCountdown(count);
- operation_(t_ptr.get());
- // Unset for the case that the operation throws no exceptions, which
- // would leave the countdown set and break the *next* exception safety
- // test after this one.
- UnsetCountdown();
- return AssertionSuccess();
- } catch (const exceptions_internal::TestException& e) {
- if (!contract(t_ptr.get())) {
- return AssertionFailure() << e.what() << " failed contract check";
- }
- }
- }
- }
- }
-
- private:
- template <typename ContractFn>
- Contract WrapContract(const ContractFn& contract) {
- return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); };
- }
-
- Contract WrapContract(StrongGuaranteeTagType) {
- return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
- }
-
- Factory factory_;
- Operation operation_;
- std::vector<Contract> contracts_;
-};
-
-/*
- * Builds a tester object that tests if performing a operation on a T follows
- * exception safety guarantees. Verification is done via contract assertion
- * callbacks applied to T instances post-throw.
- *
- * Template parameters for ExceptionSafetyTestBuilder:
- *
- * - Factory: The factory object (passed in via tester.WithFactory(...) or
- * tester.WithInitialValue(...)) must be invocable with the signature
- * `std::unique_ptr<T> operator()() const` where T is the type being tested.
- * It is used for reliably creating identical T instances to test on.
- *
- * - Operation: The operation object (passsed in via tester.WithOperation(...)
- * or tester.Test(...)) must be invocable with the signature
- * `void operator()(T*) const` where T is the type being tested. It is used
- * for performing steps on a T instance that may throw and that need to be
- * checked for exception safety. Each call to the operation will receive a
- * fresh T instance so it's free to modify and destroy the T instances as it
- * pleases.
- *
- * - Contracts...: The contract assertion callback objects (passed in via
- * tester.WithContracts(...)) must be invocable with the signature
- * `testing::AssertionResult operator()(T*) const` where T is the type being
- * tested. Contract assertion callbacks are provided T instances post-throw.
- * They must return testing::AssertionSuccess when the type contracts of the
- * provided T instance hold. If the type contracts of the T instance do not
- * hold, they must return testing::AssertionFailure. Execution order of
- * Contracts... is unspecified. They will each individually get a fresh T
- * instance so they are free to modify and destroy the T instances as they
- * please.
- */
-template <typename Factory, typename Operation, typename... Contracts>
-class ExceptionSafetyTestBuilder {
- public:
- /*
- * Returns a new ExceptionSafetyTestBuilder with an included T factory based
- * on the provided T instance. The existing factory will not be included in
- * the newly created tester instance. The created factory returns a new T
- * instance by copy-constructing the provided const T& t.
- *
- * Preconditions for tester.WithInitialValue(const T& t):
- *
- * - The const T& t object must be copy-constructible where T is the type
- * being tested. For non-copy-constructible objects, use the method
- * tester.WithFactory(...).
- */
- template <typename T>
- ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...>
- WithInitialValue(const T& t) const {
- return WithFactory(DefaultFactory<T>(t));
- }
-
- /*
- * Returns a new ExceptionSafetyTestBuilder with the provided T factory
- * included. The existing factory will not be included in the newly-created
- * tester instance. This method is intended for use with types lacking a copy
- * constructor. Types that can be copy-constructed should instead use the
- * method tester.WithInitialValue(...).
- */
- template <typename NewFactory>
- ExceptionSafetyTestBuilder<absl::decay_t<NewFactory>, Operation, Contracts...>
- WithFactory(const NewFactory& new_factory) const {
- return {new_factory, operation_, contracts_};
- }
-
- /*
- * Returns a new ExceptionSafetyTestBuilder with the provided testable
- * operation included. The existing operation will not be included in the
- * newly created tester.
- */
- template <typename NewOperation>
- ExceptionSafetyTestBuilder<Factory, absl::decay_t<NewOperation>, Contracts...>
- WithOperation(const NewOperation& new_operation) const {
- return {factory_, new_operation, contracts_};
- }
-
- /*
- * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
- * combined with the Contracts... that were already included in the instance
- * on which the method was called. Contracts... cannot be removed or replaced
- * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
- * be created in order to get an empty Contracts... list.
- *
- * In addition to passing in custom contract assertion callbacks, this method
- * accepts `testing::strong_guarantee` as an argument which checks T instances
- * post-throw against freshly created T instances via operator== to verify
- * that any state changes made during the execution of the operation were
- * properly rolled back.
- */
- template <typename... MoreContracts>
- ExceptionSafetyTestBuilder<Factory, Operation, Contracts...,
- absl::decay_t<MoreContracts>...>
- WithContracts(const MoreContracts&... more_contracts) const {
- return {
- factory_, operation_,
- std::tuple_cat(contracts_, std::tuple<absl::decay_t<MoreContracts>...>(
- more_contracts...))};
- }
-
- /*
- * Returns a testing::AssertionResult that is the reduced result of the
- * exception safety algorithm. The algorithm short circuits and returns
- * AssertionFailure after the first contract callback returns an
- * AssertionFailure. Otherwise, if all contract callbacks return an
- * AssertionSuccess, the reduced result is AssertionSuccess.
- *
- * The passed-in testable operation will not be saved in a new tester instance
- * nor will it modify/replace the existing tester instance. This is useful
- * when each operation being tested is unique and does not need to be reused.
- *
- * Preconditions for tester.Test(const NewOperation& new_operation):
- *
- * - May only be called after at least one contract assertion callback and a
- * factory or initial value have been provided.
- */
- template <
- typename NewOperation,
- typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>>
- testing::AssertionResult Test(const NewOperation& new_operation) const {
- return TestImpl(new_operation, absl::index_sequence_for<Contracts...>());
- }
-
- /*
- * Returns a testing::AssertionResult that is the reduced result of the
- * exception safety algorithm. The algorithm short circuits and returns
- * AssertionFailure after the first contract callback returns an
- * AssertionFailure. Otherwise, if all contract callbacks return an
- * AssertionSuccess, the reduced result is AssertionSuccess.
- *
- * Preconditions for tester.Test():
- *
- * - May only be called after at least one contract assertion callback, a
- * factory or initial value and a testable operation have been provided.
- */
- template <
- typename LazyOperation = Operation,
- typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>>
- testing::AssertionResult Test() const {
- return Test(operation_);
- }
-
- private:
- template <typename, typename, typename...>
- friend class ExceptionSafetyTestBuilder;
-
- friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester();
-
- ExceptionSafetyTestBuilder() {}
-
- ExceptionSafetyTestBuilder(const Factory& f, const Operation& o,
- const std::tuple<Contracts...>& i)
- : factory_(f), operation_(o), contracts_(i) {}
-
- template <typename SelectedOperation, size_t... Indices>
- testing::AssertionResult TestImpl(SelectedOperation selected_operation,
- absl::index_sequence<Indices...>) const {
- return ExceptionSafetyTest<FactoryElementType<Factory>>(
- factory_, selected_operation, std::get<Indices>(contracts_)...)
- .Test();
- }
-
- Factory factory_;
- Operation operation_;
- std::tuple<Contracts...> contracts_;
-};
-
-} // namespace exceptions_internal
-
-} // namespace testing
-
-#endif // ABSL_HAVE_EXCEPTIONS
-
-#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+ }
+
+ template <typename U>
+ ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept {
+ dummy_ = std::move(other.State());
+ return *this;
+ }
+
+ template <typename U>
+ struct rebind {
+ using other = ThrowingAllocator<U, Spec>;
+ };
+
+ pointer allocate(size_type n) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return static_cast<pointer>(::operator new(n * sizeof(T)));
+ }
+
+ pointer allocate(size_type n, const_void_pointer) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ return allocate(n);
+ }
+
+ void deallocate(pointer ptr, size_type) noexcept {
+ ReadState();
+ ::operator delete(static_cast<void*>(ptr));
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* ptr, Args&&... args) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+ }
+
+ template <typename U>
+ void destroy(U* p) noexcept {
+ ReadState();
+ p->~U();
+ }
+
+ size_type max_size() const noexcept {
+ return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+ }
+
+ ThrowingAllocator select_on_container_copy_construction() noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return *this;
+ }
+
+ template <typename U>
+ bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept {
+ return dummy_ == other.dummy_;
+ }
+
+ template <typename U>
+ bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept {
+ return dummy_ != other.dummy_;
+ }
+
+ template <typename, AllocSpec>
+ friend class ThrowingAllocator;
+
+ private:
+ static std::string GetInstanceString(int dummy) {
+ return absl::StrCat("ThrowingAllocator<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ const std::shared_ptr<const int>& State() const { return dummy_; }
+ std::shared_ptr<const int>& State() { return dummy_; }
+
+ void ReadState() {
+ // we know that this will never be true, but the compiler doesn't, so this
+ // should safely force a read of the value.
+ if (*dummy_ < 0) std::abort();
+ }
+
+ void ReadStateAndMaybeThrow(absl::string_view msg) const {
+ if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ exceptions_internal::MaybeThrow(
+ absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+ }
+ }
+
+ static int next_id_;
+ std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, AllocSpec Spec>
+int ThrowingAllocator<T, Spec>::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method. Side effects can then be
+// tested for resource leaks.
+template <typename T, typename... Args>
+void TestThrowingCtor(Args&&... args) {
+ struct Cleanup {
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+ exceptions_internal::SetCountdown(count);
+ try {
+ T temp(std::forward<Args>(args)...);
+ static_cast<void>(temp);
+ break;
+ } catch (const exceptions_internal::TestException&) {
+ }
+ }
+}
+
+// Tests the nothrow guarantee of the provided nullary operation. If the an
+// exception is thrown, the result will be AssertionFailure(). Otherwise, it
+// will be AssertionSuccess().
+template <typename Operation>
+testing::AssertionResult TestNothrowOp(const Operation& operation) {
+ struct Cleanup {
+ Cleanup() { exceptions_internal::SetCountdown(); }
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ try {
+ operation();
+ return testing::AssertionSuccess();
+ } catch (const exceptions_internal::TestException&) {
+ return testing::AssertionFailure()
+ << "TestException thrown during call to operation() when nothrow "
+ "guarantee was expected.";
+ } catch (...) {
+ return testing::AssertionFailure()
+ << "Unknown exception thrown during call to operation() when "
+ "nothrow guarantee was expected.";
+ }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
+struct UninitializedT {};
+
+template <typename T>
+class DefaultFactory {
+ public:
+ explicit DefaultFactory(const T& t) : t_(t) {}
+ std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
+
+ private:
+ T t_;
+};
+
+template <size_t LazyContractsCount, typename LazyFactory,
+ typename LazyOperation>
+using EnableIfTestable = typename absl::enable_if_t<
+ LazyContractsCount != 0 &&
+ !std::is_same<LazyFactory, UninitializedT>::value &&
+ !std::is_same<LazyOperation, UninitializedT>::value>;
+
+template <typename Factory = UninitializedT,
+ typename Operation = UninitializedT, typename... Contracts>
+class ExceptionSafetyTestBuilder;
+
+} // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTestBuilder. All
+ * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
+ * methods return new instances of ExceptionSafetyTestBuilder.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one contract callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+template <typename T>
+struct IsUniquePtr : std::false_type {};
+
+template <typename T, typename D>
+struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {};
+
+template <typename Factory>
+struct FactoryPtrTypeHelper {
+ using type = decltype(std::declval<const Factory&>()());
+
+ static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr");
+};
+
+template <typename Factory>
+using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type;
+
+template <typename Factory>
+using FactoryElementType = typename FactoryPtrType<Factory>::element_type;
+
+template <typename T>
+class ExceptionSafetyTest {
+ using Factory = std::function<std::unique_ptr<T>()>;
+ using Operation = std::function<void(T*)>;
+ using Contract = std::function<AssertionResult(T*)>;
+
+ public:
+ template <typename... Contracts>
+ explicit ExceptionSafetyTest(const Factory& f, const Operation& op,
+ const Contracts&... contracts)
+ : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
+
+ AssertionResult Test() const {
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+
+ for (const auto& contract : contracts_) {
+ auto t_ptr = factory_();
+ try {
+ SetCountdown(count);
+ operation_(t_ptr.get());
+ // Unset for the case that the operation throws no exceptions, which
+ // would leave the countdown set and break the *next* exception safety
+ // test after this one.
+ UnsetCountdown();
+ return AssertionSuccess();
+ } catch (const exceptions_internal::TestException& e) {
+ if (!contract(t_ptr.get())) {
+ return AssertionFailure() << e.what() << " failed contract check";
+ }
+ }
+ }
+ }
+ }
+
+ private:
+ template <typename ContractFn>
+ Contract WrapContract(const ContractFn& contract) {
+ return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); };
+ }
+
+ Contract WrapContract(StrongGuaranteeTagType) {
+ return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
+ }
+
+ Factory factory_;
+ Operation operation_;
+ std::vector<Contract> contracts_;
+};
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via contract assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTestBuilder:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ * tester.WithInitialValue(...)) must be invocable with the signature
+ * `std::unique_ptr<T> operator()() const` where T is the type being tested.
+ * It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ * or tester.Test(...)) must be invocable with the signature
+ * `void operator()(T*) const` where T is the type being tested. It is used
+ * for performing steps on a T instance that may throw and that need to be
+ * checked for exception safety. Each call to the operation will receive a
+ * fresh T instance so it's free to modify and destroy the T instances as it
+ * pleases.
+ *
+ * - Contracts...: The contract assertion callback objects (passed in via
+ * tester.WithContracts(...)) must be invocable with the signature
+ * `testing::AssertionResult operator()(T*) const` where T is the type being
+ * tested. Contract assertion callbacks are provided T instances post-throw.
+ * They must return testing::AssertionSuccess when the type contracts of the
+ * provided T instance hold. If the type contracts of the T instance do not
+ * hold, they must return testing::AssertionFailure. Execution order of
+ * Contracts... is unspecified. They will each individually get a fresh T
+ * instance so they are free to modify and destroy the T instances as they
+ * please.
+ */
+template <typename Factory, typename Operation, typename... Contracts>
+class ExceptionSafetyTestBuilder {
+ public:
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with an included T factory based
+ * on the provided T instance. The existing factory will not be included in
+ * the newly created tester instance. The created factory returns a new T
+ * instance by copy-constructing the provided const T& t.
+ *
+ * Preconditions for tester.WithInitialValue(const T& t):
+ *
+ * - The const T& t object must be copy-constructible where T is the type
+ * being tested. For non-copy-constructible objects, use the method
+ * tester.WithFactory(...).
+ */
+ template <typename T>
+ ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...>
+ WithInitialValue(const T& t) const {
+ return WithFactory(DefaultFactory<T>(t));
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided T factory
+ * included. The existing factory will not be included in the newly-created
+ * tester instance. This method is intended for use with types lacking a copy
+ * constructor. Types that can be copy-constructed should instead use the
+ * method tester.WithInitialValue(...).
+ */
+ template <typename NewFactory>
+ ExceptionSafetyTestBuilder<absl::decay_t<NewFactory>, Operation, Contracts...>
+ WithFactory(const NewFactory& new_factory) const {
+ return {new_factory, operation_, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided testable
+ * operation included. The existing operation will not be included in the
+ * newly created tester.
+ */
+ template <typename NewOperation>
+ ExceptionSafetyTestBuilder<Factory, absl::decay_t<NewOperation>, Contracts...>
+ WithOperation(const NewOperation& new_operation) const {
+ return {factory_, new_operation, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
+ * combined with the Contracts... that were already included in the instance
+ * on which the method was called. Contracts... cannot be removed or replaced
+ * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
+ * be created in order to get an empty Contracts... list.
+ *
+ * In addition to passing in custom contract assertion callbacks, this method
+ * accepts `testing::strong_guarantee` as an argument which checks T instances
+ * post-throw against freshly created T instances via operator== to verify
+ * that any state changes made during the execution of the operation were
+ * properly rolled back.
+ */
+ template <typename... MoreContracts>
+ ExceptionSafetyTestBuilder<Factory, Operation, Contracts...,
+ absl::decay_t<MoreContracts>...>
+ WithContracts(const MoreContracts&... more_contracts) const {
+ return {
+ factory_, operation_,
+ std::tuple_cat(contracts_, std::tuple<absl::decay_t<MoreContracts>...>(
+ more_contracts...))};
+ }
+
+ /*
+ * Returns a testing::AssertionResult that is the reduced result of the
+ * exception safety algorithm. The algorithm short circuits and returns
+ * AssertionFailure after the first contract callback returns an
+ * AssertionFailure. Otherwise, if all contract callbacks return an
+ * AssertionSuccess, the reduced result is AssertionSuccess.
+ *
+ * The passed-in testable operation will not be saved in a new tester instance
+ * nor will it modify/replace the existing tester instance. This is useful
+ * when each operation being tested is unique and does not need to be reused.
+ *
+ * Preconditions for tester.Test(const NewOperation& new_operation):
+ *
+ * - May only be called after at least one contract assertion callback and a
+ * factory or initial value have been provided.
+ */
+ template <
+ typename NewOperation,
+ typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>>
+ testing::AssertionResult Test(const NewOperation& new_operation) const {
+ return TestImpl(new_operation, absl::index_sequence_for<Contracts...>());
+ }
+
+ /*
+ * Returns a testing::AssertionResult that is the reduced result of the
+ * exception safety algorithm. The algorithm short circuits and returns
+ * AssertionFailure after the first contract callback returns an
+ * AssertionFailure. Otherwise, if all contract callbacks return an
+ * AssertionSuccess, the reduced result is AssertionSuccess.
+ *
+ * Preconditions for tester.Test():
+ *
+ * - May only be called after at least one contract assertion callback, a
+ * factory or initial value and a testable operation have been provided.
+ */
+ template <
+ typename LazyOperation = Operation,
+ typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>>
+ testing::AssertionResult Test() const {
+ return Test(operation_);
+ }
+
+ private:
+ template <typename, typename, typename...>
+ friend class ExceptionSafetyTestBuilder;
+
+ friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester();
+
+ ExceptionSafetyTestBuilder() {}
+
+ ExceptionSafetyTestBuilder(const Factory& f, const Operation& o,
+ const std::tuple<Contracts...>& i)
+ : factory_(f), operation_(o), contracts_(i) {}
+
+ template <typename SelectedOperation, size_t... Indices>
+ testing::AssertionResult TestImpl(SelectedOperation selected_operation,
+ absl::index_sequence<Indices...>) const {
+ return ExceptionSafetyTest<FactoryElementType<Factory>>(
+ factory_, selected_operation, std::get<Indices>(contracts_)...)
+ .Test();
+ }
+
+ Factory factory_;
+ Operation operation_;
+ std::tuple<Contracts...> contracts_;
+};
+
+} // namespace exceptions_internal
+
+} // namespace testing
+
+#endif // ABSL_HAVE_EXCEPTIONS
+
+#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/exception_testing.h b/contrib/restricted/abseil-cpp/absl/base/internal/exception_testing.h
index fb40da49a6..01b5465571 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/exception_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/exception_testing.h
@@ -1,42 +1,42 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Testing utilities for ABSL types which throw exceptions.
-
-#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
-#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
-
-#include "gtest/gtest.h"
-#include "absl/base/config.h"
-
-// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
-// if exceptions are enabled, or for death with a specified text in the error
-// message
-#ifdef ABSL_HAVE_EXCEPTIONS
-
-#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
- EXPECT_THROW(expr, exception_t)
-
-#elif defined(__ANDROID__)
-// Android asserts do not log anywhere that gtest can currently inspect.
-// So we expect exit, but cannot match the message.
-#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
- EXPECT_DEATH(expr, ".*")
-#else
-#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
- EXPECT_DEATH_IF_SUPPORTED(expr, text)
-
-#endif
-
-#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_THROW(expr, exception_t)
+
+#elif defined(__ANDROID__)
+// Android asserts do not log anywhere that gtest can currently inspect.
+// So we expect exit, but cannot match the message.
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_DEATH(expr, ".*")
+#else
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+ EXPECT_DEATH_IF_SUPPORTED(expr, text)
+
+#endif
+
+#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/hide_ptr.h b/contrib/restricted/abseil-cpp/absl/base/internal/hide_ptr.h
index 4928719a98..1dba80909a 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/hide_ptr.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/hide_ptr.h
@@ -1,51 +1,51 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
-#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
-
-#include <cstdint>
-
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
+#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
+
+#include <cstdint>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// Arbitrary value with high bits set. Xor'ing with it is unlikely
-// to map one valid pointer to another valid pointer.
-constexpr uintptr_t HideMask() {
- return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
-}
-
-// Hide a pointer from the leak checker. For internal use only.
-// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
-// and all objects reachable from ptr to be ignored by the leak checker.
-template <class T>
-inline uintptr_t HidePtr(T* ptr) {
- return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
-}
-
-// Return a pointer that has been hidden from the leak checker.
-// For internal use only.
-template <class T>
-inline T* UnhidePtr(uintptr_t hidden) {
- return reinterpret_cast<T*>(hidden ^ HideMask());
-}
-
-} // namespace base_internal
+namespace base_internal {
+
+// Arbitrary value with high bits set. Xor'ing with it is unlikely
+// to map one valid pointer to another valid pointer.
+constexpr uintptr_t HideMask() {
+ return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
+}
+
+// Hide a pointer from the leak checker. For internal use only.
+// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
+// and all objects reachable from ptr to be ignored by the leak checker.
+template <class T>
+inline uintptr_t HidePtr(T* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
+}
+
+// Return a pointer that has been hidden from the leak checker.
+// For internal use only.
+template <class T>
+inline T* UnhidePtr(uintptr_t hidden) {
+ return reinterpret_cast<T*>(hidden ^ HideMask());
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/identity.h b/contrib/restricted/abseil-cpp/absl/base/internal/identity.h
index b7314454f1..a3154ed7bc 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/identity.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/identity.h
@@ -1,37 +1,37 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
-#define ABSL_BASE_INTERNAL_IDENTITY_H_
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace internal {
-
-template <typename T>
-struct identity {
- typedef T type;
-};
-
-template <typename T>
-using identity_t = typename identity<T>::type;
-
-} // namespace internal
+namespace internal {
+
+template <typename T>
+struct identity {
+ typedef T type;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+} // namespace internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_IDENTITY_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h
index eccce68fe7..130d8c2476 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable.h
@@ -1,107 +1,107 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
-#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
-
-#include <type_traits>
-
-#include "absl/base/internal/identity.h"
-
-// File:
-// This file define a macro that allows the creation of or emulation of C++17
-// inline variables based on whether or not the feature is supported.
-
-////////////////////////////////////////////////////////////////////////////////
-// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
-//
-// Description:
-// Expands to the equivalent of an inline constexpr instance of the specified
-// `type` and `name`, initialized to the value `init`. If the compiler being
-// used is detected as supporting actual inline variables as a language
-// feature, then the macro expands to an actual inline variable definition.
-//
-// Requires:
-// `type` is a type that is usable in an extern variable declaration.
-//
-// Requires: `name` is a valid identifier
-//
-// Requires:
-// `init` is an expression that can be used in the following definition:
-// constexpr type name = init;
-//
-// Usage:
-//
-// // Equivalent to: `inline constexpr size_t variant_npos = -1;`
-// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
-//
-// Differences in implementation:
-// For a direct, language-level inline variable, decltype(name) will be the
-// type that was specified along with const qualification, whereas for
-// emulated inline variables, decltype(name) may be different (in practice
-// it will likely be a reference type).
-////////////////////////////////////////////////////////////////////////////////
-
-#ifdef __cpp_inline_variables
-
-// Clang's -Wmissing-variable-declarations option erroneously warned that
-// inline constexpr objects need to be pre-declared. This has now been fixed,
-// but we will need to support this workaround for people building with older
-// versions of clang.
-//
-// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
-//
-// Note:
-// identity_t is used here so that the const and name are in the
-// appropriate place for pointer types, reference types, function pointer
-// types, etc..
-#if defined(__clang__)
-#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
- extern const ::absl::internal::identity_t<type> name;
-#else // Otherwise, just define the macro to do nothing.
-#define ABSL_INTERNAL_EXTERN_DECL(type, name)
-#endif // defined(__clang__)
-
-// See above comment at top of file for details.
-#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
- ABSL_INTERNAL_EXTERN_DECL(type, name) \
- inline constexpr ::absl::internal::identity_t<type> name = init
-
-#else
-
-// See above comment at top of file for details.
-//
-// Note:
-// identity_t is used here so that the const and name are in the
-// appropriate place for pointer types, reference types, function pointer
-// types, etc..
-#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
- template <class /*AbslInternalDummy*/ = void> \
- struct AbslInternalInlineVariableHolder##name { \
- static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
- }; \
- \
- template <class AbslInternalDummy> \
- constexpr ::absl::internal::identity_t<var_type> \
- AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
- \
- static constexpr const ::absl::internal::identity_t<var_type>& \
- name = /* NOLINT */ \
- AbslInternalInlineVariableHolder##name<>::kInstance; \
- static_assert(sizeof(void (*)(decltype(name))) != 0, \
- "Silence unused variable warnings.")
-
-#endif // __cpp_inline_variables
-
-#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+
+#include <type_traits>
+
+#include "absl/base/internal/identity.h"
+
+// File:
+// This file define a macro that allows the creation of or emulation of C++17
+// inline variables based on whether or not the feature is supported.
+
+////////////////////////////////////////////////////////////////////////////////
+// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
+//
+// Description:
+// Expands to the equivalent of an inline constexpr instance of the specified
+// `type` and `name`, initialized to the value `init`. If the compiler being
+// used is detected as supporting actual inline variables as a language
+// feature, then the macro expands to an actual inline variable definition.
+//
+// Requires:
+// `type` is a type that is usable in an extern variable declaration.
+//
+// Requires: `name` is a valid identifier
+//
+// Requires:
+// `init` is an expression that can be used in the following definition:
+// constexpr type name = init;
+//
+// Usage:
+//
+// // Equivalent to: `inline constexpr size_t variant_npos = -1;`
+// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+//
+// Differences in implementation:
+// For a direct, language-level inline variable, decltype(name) will be the
+// type that was specified along with const qualification, whereas for
+// emulated inline variables, decltype(name) may be different (in practice
+// it will likely be a reference type).
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __cpp_inline_variables
+
+// Clang's -Wmissing-variable-declarations option erroneously warned that
+// inline constexpr objects need to be pre-declared. This has now been fixed,
+// but we will need to support this workaround for people building with older
+// versions of clang.
+//
+// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
+//
+// Note:
+// identity_t is used here so that the const and name are in the
+// appropriate place for pointer types, reference types, function pointer
+// types, etc..
+#if defined(__clang__)
+#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
+ extern const ::absl::internal::identity_t<type> name;
+#else // Otherwise, just define the macro to do nothing.
+#define ABSL_INTERNAL_EXTERN_DECL(type, name)
+#endif // defined(__clang__)
+
+// See above comment at top of file for details.
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
+ ABSL_INTERNAL_EXTERN_DECL(type, name) \
+ inline constexpr ::absl::internal::identity_t<type> name = init
+
+#else
+
+// See above comment at top of file for details.
+//
+// Note:
+// identity_t is used here so that the const and name are in the
+// appropriate place for pointer types, reference types, function pointer
+// types, etc..
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
+ template <class /*AbslInternalDummy*/ = void> \
+ struct AbslInternalInlineVariableHolder##name { \
+ static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
+ }; \
+ \
+ template <class AbslInternalDummy> \
+ constexpr ::absl::internal::identity_t<var_type> \
+ AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
+ \
+ static constexpr const ::absl::internal::identity_t<var_type>& \
+ name = /* NOLINT */ \
+ AbslInternalInlineVariableHolder##name<>::kInstance; \
+ static_assert(sizeof(void (*)(decltype(name))) != 0, \
+ "Silence unused variable warnings.")
+
+#endif // __cpp_inline_variables
+
+#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h
index 7e72bbe266..3856b9f80f 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/inline_variable_testing.h
@@ -1,46 +1,46 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
-#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
-
-#include "absl/base/internal/inline_variable.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+
+#include "absl/base/internal/inline_variable.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace inline_variable_testing_internal {
-
-struct Foo {
- int value = 5;
-};
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
-ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
-ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
-
-ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
-
-const Foo& get_foo_a();
-const Foo& get_foo_b();
-
-const int& get_int_a();
-const int& get_int_b();
-
-} // namespace inline_variable_testing_internal
+namespace inline_variable_testing_internal {
+
+struct Foo {
+ int value = 5;
+};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
+
+const Foo& get_foo_a();
+const Foo& get_foo_b();
+
+const int& get_int_a();
+const int& get_int_b();
+
+} // namespace inline_variable_testing_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h b/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h
index 14e80afa0a..5c71f32823 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/invoke.h
@@ -1,187 +1,187 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
// absl::base_internal::invoke(f, args...) is an implementation of
-// INVOKE(f, args...) from section [func.require] of the C++ standard.
-//
-// [func.require]
-// Define INVOKE (f, t1, t2, ..., tN) as follows:
-// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
-// and t1 is an object of type T or a reference to an object of type T or a
-// reference to an object of a type derived from T;
-// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
-// class T and t1 is not one of the types described in the previous item;
-// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
-// an object of type T or a reference to an object of type T or a reference
-// to an object of a type derived from T;
-// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
-// is not one of the types described in the previous item;
-// 5. f(t1, t2, ..., tN) in all other cases.
-//
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
// The implementation is SFINAE-friendly: substitution failure within invoke()
-// isn't an error.
-
-#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
-#define ABSL_BASE_INTERNAL_INVOKE_H_
-
-#include <algorithm>
-#include <type_traits>
-#include <utility>
-
-#include "absl/meta/type_traits.h"
-
-// The following code is internal implementation detail. See the comment at the
-// top of this file for the API documentation.
-
-namespace absl {
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+// The following code is internal implementation detail. See the comment at the
+// top of this file for the API documentation.
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// The five classes below each implement one of the clauses from the definition
-// of INVOKE. The inner class template Accept<F, Args...> checks whether the
-// clause is applicable; static function template Invoke(f, args...) does the
-// invocation.
-//
-// By separating the clause selection logic from invocation we make sure that
-// Invoke() does exactly what the standard says.
-
-template <typename Derived>
-struct StrippedAccept {
- template <typename... Args>
- struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
- typename std::remove_reference<Args>::type>::type...> {};
-};
-
-// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
-// and t1 is an object of type T or a reference to an object of type T or a
-// reference to an object of a type derived from T.
-struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename MemFunType, typename C, typename Obj, typename... Args>
- struct AcceptImpl<MemFunType C::*, Obj, Args...>
- : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
- absl::is_function<MemFunType>::value> {
- };
-
- template <typename MemFun, typename Obj, typename... Args>
- static decltype((std::declval<Obj>().*
- std::declval<MemFun>())(std::declval<Args>()...))
- Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
- return (std::forward<Obj>(obj).*
- std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
- }
-};
-
-// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
-// class T and t1 is not one of the types described in the previous item.
-struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename MemFunType, typename C, typename Ptr, typename... Args>
- struct AcceptImpl<MemFunType C::*, Ptr, Args...>
- : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
- absl::is_function<MemFunType>::value> {
- };
-
- template <typename MemFun, typename Ptr, typename... Args>
- static decltype(((*std::declval<Ptr>()).*
- std::declval<MemFun>())(std::declval<Args>()...))
- Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
- return ((*std::forward<Ptr>(ptr)).*
- std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
- }
-};
-
-// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
-// an object of type T or a reference to an object of type T or a reference
-// to an object of a type derived from T.
-struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename R, typename C, typename Obj>
- struct AcceptImpl<R C::*, Obj>
- : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
- !absl::is_function<R>::value> {};
-
- template <typename DataMem, typename Ref>
- static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
- DataMem&& data_mem, Ref&& ref) {
- return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
- }
-};
-
-// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
-// is not one of the types described in the previous item.
-struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
- template <typename... Args>
- struct AcceptImpl : std::false_type {};
-
- template <typename R, typename C, typename Ptr>
- struct AcceptImpl<R C::*, Ptr>
- : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
- !absl::is_function<R>::value> {};
-
- template <typename DataMem, typename Ptr>
- static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
- DataMem&& data_mem, Ptr&& ptr) {
- return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
- }
-};
-
-// f(t1, t2, ..., tN) in all other cases.
-struct Callable {
- // Callable doesn't have Accept because it's the last clause that gets picked
- // when none of the previous clauses are applicable.
- template <typename F, typename... Args>
- static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
- F&& f, Args&&... args) {
- return std::forward<F>(f)(std::forward<Args>(args)...);
- }
-};
-
-// Resolves to the first matching clause.
-template <typename... Args>
-struct Invoker {
- typedef typename std::conditional<
- MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
- typename std::conditional<
- MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
- typename std::conditional<
- DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
- typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
- DataMemAndPtr, Callable>::type>::type>::
- type>::type type;
-};
-
-// The result type of Invoke<F, Args...>.
-template <typename F, typename... Args>
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+ template <typename... Args>
+ struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+ typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename MemFunType, typename C, typename Obj, typename... Args>
+ struct AcceptImpl<MemFunType C::*, Obj, Args...>
+ : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+ absl::is_function<MemFunType>::value> {
+ };
+
+ template <typename MemFun, typename Obj, typename... Args>
+ static decltype((std::declval<Obj>().*
+ std::declval<MemFun>())(std::declval<Args>()...))
+ Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+ return (std::forward<Obj>(obj).*
+ std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+ }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename MemFunType, typename C, typename Ptr, typename... Args>
+ struct AcceptImpl<MemFunType C::*, Ptr, Args...>
+ : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+ absl::is_function<MemFunType>::value> {
+ };
+
+ template <typename MemFun, typename Ptr, typename... Args>
+ static decltype(((*std::declval<Ptr>()).*
+ std::declval<MemFun>())(std::declval<Args>()...))
+ Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+ return ((*std::forward<Ptr>(ptr)).*
+ std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+ }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename R, typename C, typename Obj>
+ struct AcceptImpl<R C::*, Obj>
+ : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+ !absl::is_function<R>::value> {};
+
+ template <typename DataMem, typename Ref>
+ static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+ DataMem&& data_mem, Ref&& ref) {
+ return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+ }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+ template <typename... Args>
+ struct AcceptImpl : std::false_type {};
+
+ template <typename R, typename C, typename Ptr>
+ struct AcceptImpl<R C::*, Ptr>
+ : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+ !absl::is_function<R>::value> {};
+
+ template <typename DataMem, typename Ptr>
+ static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+ DataMem&& data_mem, Ptr&& ptr) {
+ return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+ }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+ // Callable doesn't have Accept because it's the last clause that gets picked
+ // when none of the previous clauses are applicable.
+ template <typename F, typename... Args>
+ static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+ F&& f, Args&&... args) {
+ return std::forward<F>(f)(std::forward<Args>(args)...);
+ }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+ typedef typename std::conditional<
+ MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+ typename std::conditional<
+ MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+ typename std::conditional<
+ DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+ typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+ DataMemAndPtr, Callable>::type>::type>::
+ type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
- std::declval<F>(), std::declval<Args>()...));
-
-// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
-// [func.require] of the C++ standard.
-template <typename F, typename... Args>
+ std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
- return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
- std::forward<Args>(args)...);
-}
-} // namespace base_internal
+ return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+ std::forward<Args>(args)...);
+}
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_INVOKE_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
index ad5ead43db..229ab9162d 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.cc
@@ -1,620 +1,620 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A low-level allocator that can be used by other low-level
-// modules without introducing dependency cycles.
-// This allocator is slow and wasteful of memory;
-// it should not be used when performance is key.
-
-#include "absl/base/internal/low_level_alloc.h"
-
-#include <type_traits>
-
-#include "absl/base/call_once.h"
-#include "absl/base/config.h"
-#include "absl/base/internal/direct_mmap.h"
-#include "absl/base/internal/scheduling_mode.h"
-#include "absl/base/macros.h"
-#include "absl/base/thread_annotations.h"
-
-// LowLevelAlloc requires that the platform support low-level
-// allocation of virtual memory. Platforms lacking this cannot use
-// LowLevelAlloc.
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#ifndef _WIN32
-#include <pthread.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#else
-#include <windows.h>
-#endif
-
-#include <string.h>
-#include <algorithm>
-#include <atomic>
-#include <cerrno>
-#include <cstddef>
-#include <new> // for placement-new
-
-#include "absl/base/dynamic_annotations.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-
-// MAP_ANONYMOUS
-#if defined(__APPLE__)
-// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
-// deprecated. In Darwin, MAP_ANON is all there is.
-#if !defined MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif // !MAP_ANONYMOUS
-#endif // __APPLE__
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <type_traits>
+
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <new> // for placement-new
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// MAP_ANONYMOUS
+#if defined(__APPLE__)
+// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
+// deprecated. In Darwin, MAP_ANON is all there is.
+#if !defined MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif // !MAP_ANONYMOUS
+#endif // __APPLE__
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// A first-fit allocator with amortized logarithmic free() time.
-
-// ---------------------------------------------------------------------------
-static const int kMaxLevel = 30;
-
-namespace {
-// This struct describes one allocated block, or one free block.
-struct AllocList {
- struct Header {
- // Size of entire region, including this field. Must be
- // first. Valid in both allocated and unallocated blocks.
- uintptr_t size;
-
- // kMagicAllocated or kMagicUnallocated xor this.
- uintptr_t magic;
-
- // Pointer to parent arena.
- LowLevelAlloc::Arena *arena;
-
- // Aligns regions to 0 mod 2*sizeof(void*).
- void *dummy_for_alignment;
- } header;
-
- // Next two fields: in unallocated blocks: freelist skiplist data
- // in allocated blocks: overlaps with client data
-
- // Levels in skiplist used.
- int levels;
-
- // Actually has levels elements. The AllocList node may not have room
- // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
- AllocList *next[kMaxLevel];
-};
-} // namespace
-
-// ---------------------------------------------------------------------------
-// A trivial skiplist implementation. This is used to keep the freelist
-// in address order while taking only logarithmic time per insert and delete.
-
-// An integer approximation of log2(size/base)
-// Requires size >= base.
-static int IntLog2(size_t size, size_t base) {
- int result = 0;
- for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
- result++;
- }
- // floor(size / 2**result) <= base < floor(size / 2**(result-1))
- // => log2(size/(base+1)) <= result < 1+log2(size/base)
- // => result ~= log2(size/base)
- return result;
-}
-
-// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
-static int Random(uint32_t *state) {
- uint32_t r = *state;
- int result = 1;
- while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
- result++;
- }
- *state = r;
- return result;
-}
-
-// Return a number of skiplist levels for a node of size bytes, where
-// base is the minimum node size. Compute level=log2(size / base)+n
-// where n is 1 if random is false and otherwise a random number generated with
-// the standard distribution for a skiplist: See Random() above.
-// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
-// term, so first-fit searches touch fewer nodes. "level" is clipped so
-// level<kMaxLevel and next[level-1] will fit in the node.
-// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
-static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
- // max_fit is the maximum number of levels that will fit in a node for the
- // given size. We can't return more than max_fit, no matter what the
- // random number generator says.
- size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
- int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
- if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
- if (level > kMaxLevel-1) level = kMaxLevel - 1;
- ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
- return level;
-}
-
-// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
-// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
-// points to the last element at level i in the AllocList less than *e, or is
-// head if no such element exists.
-static AllocList *LLA_SkiplistSearch(AllocList *head,
- AllocList *e, AllocList **prev) {
- AllocList *p = head;
- for (int level = head->levels - 1; level >= 0; level--) {
- for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
- }
- prev[level] = p;
- }
- return (head->levels == 0) ? nullptr : prev[0]->next[0];
-}
-
-// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
-// Requires that e->levels be previously set by the caller (using
-// LLA_SkiplistLevels())
-static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
- AllocList **prev) {
- LLA_SkiplistSearch(head, e, prev);
- for (; head->levels < e->levels; head->levels++) { // extend prev pointers
- prev[head->levels] = head; // to all *e's levels
- }
- for (int i = 0; i != e->levels; i++) { // add element to list
- e->next[i] = prev[i]->next[i];
- prev[i]->next[i] = e;
- }
-}
-
-// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
-// Requires that e->levels be previous set by the caller (using
-// LLA_SkiplistLevels())
-static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
- AllocList **prev) {
- AllocList *found = LLA_SkiplistSearch(head, e, prev);
- ABSL_RAW_CHECK(e == found, "element not in freelist");
- for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
- prev[i]->next[i] = e->next[i];
- }
- while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
- head->levels--; // reduce head->levels if level unused
- }
-}
-
-// ---------------------------------------------------------------------------
-// Arena implementation
-
-// Metadata for an LowLevelAlloc arena instance.
-struct LowLevelAlloc::Arena {
- // Constructs an arena with the given LowLevelAlloc flags.
- explicit Arena(uint32_t flags_value);
-
- base_internal::SpinLock mu;
- // Head of free list, sorted by address
- AllocList freelist ABSL_GUARDED_BY(mu);
- // Count of allocated blocks
- int32_t allocation_count ABSL_GUARDED_BY(mu);
- // flags passed to NewArena
- const uint32_t flags;
- // Result of sysconf(_SC_PAGESIZE)
- const size_t pagesize;
- // Lowest power of two >= max(16, sizeof(AllocList))
- const size_t round_up;
- // Smallest allocation block size
- const size_t min_size;
- // PRNG state
- uint32_t random ABSL_GUARDED_BY(mu);
-};
-
-namespace {
-// Static storage space for the lazily-constructed, default global arena
-// instances. We require this space because the whole point of LowLevelAlloc
-// is to avoid relying on malloc/new.
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+ struct Header {
+ // Size of entire region, including this field. Must be
+ // first. Valid in both allocated and unallocated blocks.
+ uintptr_t size;
+
+ // kMagicAllocated or kMagicUnallocated xor this.
+ uintptr_t magic;
+
+ // Pointer to parent arena.
+ LowLevelAlloc::Arena *arena;
+
+ // Aligns regions to 0 mod 2*sizeof(void*).
+ void *dummy_for_alignment;
+ } header;
+
+ // Next two fields: in unallocated blocks: freelist skiplist data
+ // in allocated blocks: overlaps with client data
+
+ // Levels in skiplist used.
+ int levels;
+
+ // Actually has levels elements. The AllocList node may not have room
+ // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+ AllocList *next[kMaxLevel];
+};
+} // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation. This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+ int result = 0;
+ for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
+ result++;
+ }
+ // floor(size / 2**result) <= base < floor(size / 2**(result-1))
+ // => log2(size/(base+1)) <= result < 1+log2(size/base)
+ // => result ~= log2(size/base)
+ return result;
+}
+
+// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+ uint32_t r = *state;
+ int result = 1;
+ while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+ result++;
+ }
+ *state = r;
+ return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size. Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist: See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes. "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+ // max_fit is the maximum number of levels that will fit in a node for the
+ // given size. We can't return more than max_fit, no matter what the
+ // random number generator says.
+ size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+ int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+ if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+ if (level > kMaxLevel-1) level = kMaxLevel - 1;
+ ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+ return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head,
+ AllocList *e, AllocList **prev) {
+ AllocList *p = head;
+ for (int level = head->levels - 1; level >= 0; level--) {
+ for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+ }
+ prev[level] = p;
+ }
+ return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+ AllocList **prev) {
+ LLA_SkiplistSearch(head, e, prev);
+ for (; head->levels < e->levels; head->levels++) { // extend prev pointers
+ prev[head->levels] = head; // to all *e's levels
+ }
+ for (int i = 0; i != e->levels; i++) { // add element to list
+ e->next[i] = prev[i]->next[i];
+ prev[i]->next[i] = e;
+ }
+}
+
+// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+ AllocList **prev) {
+ AllocList *found = LLA_SkiplistSearch(head, e, prev);
+ ABSL_RAW_CHECK(e == found, "element not in freelist");
+ for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+ prev[i]->next[i] = e->next[i];
+ }
+ while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+ head->levels--; // reduce head->levels if level unused
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+// Metadata for an LowLevelAlloc arena instance.
+struct LowLevelAlloc::Arena {
+ // Constructs an arena with the given LowLevelAlloc flags.
+ explicit Arena(uint32_t flags_value);
+
+ base_internal::SpinLock mu;
+ // Head of free list, sorted by address
+ AllocList freelist ABSL_GUARDED_BY(mu);
+ // Count of allocated blocks
+ int32_t allocation_count ABSL_GUARDED_BY(mu);
+ // flags passed to NewArena
+ const uint32_t flags;
+ // Result of sysconf(_SC_PAGESIZE)
+ const size_t pagesize;
+ // Lowest power of two >= max(16, sizeof(AllocList))
+ const size_t round_up;
+ // Smallest allocation block size
+ const size_t min_size;
+ // PRNG state
+ uint32_t random ABSL_GUARDED_BY(mu);
+};
+
+namespace {
+// Static storage space for the lazily-constructed, default global arena
+// instances. We require this space because the whole point of LowLevelAlloc
+// is to avoid relying on malloc/new.
alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
LowLevelAlloc::Arena)];
alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
LowLevelAlloc::Arena)];
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
alignas(
LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
[sizeof(LowLevelAlloc::Arena)];
-#endif
-
-// We must use LowLevelCallOnce here to construct the global arenas, rather than
-// using function-level statics, to avoid recursively invoking the scheduler.
-absl::once_flag create_globals_once;
-
-void CreateGlobalArenas() {
- new (&default_arena_storage)
- LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
- new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- new (&unhooked_async_sig_safe_arena_storage)
- LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
-#endif
-}
-
-// Returns a global arena that does not call into hooks. Used by NewArena()
-// when kCallMallocHook is not set.
-LowLevelAlloc::Arena* UnhookedArena() {
- base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
-}
-
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
-// Returns a global arena that is async-signal safe. Used by NewArena() when
-// kAsyncSignalSafe is set.
-LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
- base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena *>(
- &unhooked_async_sig_safe_arena_storage);
-}
-#endif
-
-} // namespace
-
-// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
-LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
- base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
-}
-
-// magic numbers to identify allocated and unallocated blocks
-static const uintptr_t kMagicAllocated = 0x4c833e95U;
-static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
-
-namespace {
-class ABSL_SCOPED_LOCKABLE ArenaLock {
- public:
- explicit ArenaLock(LowLevelAlloc::Arena *arena)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
- : arena_(arena) {
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
- sigset_t all;
- sigfillset(&all);
- mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
- }
-#endif
- arena_->mu.Lock();
- }
- ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
- void Leave() ABSL_UNLOCK_FUNCTION() {
- arena_->mu.Unlock();
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if (mask_valid_) {
- const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
- }
- }
-#endif
- left_ = true;
- }
-
- private:
- bool left_ = false; // whether left region
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- bool mask_valid_ = false;
- sigset_t mask_; // old mask of blocked signals
-#endif
- LowLevelAlloc::Arena *arena_;
- ArenaLock(const ArenaLock &) = delete;
- ArenaLock &operator=(const ArenaLock &) = delete;
-};
-} // namespace
-
-// create an appropriate magic number for an object at "ptr"
-// "magic" should be kMagicAllocated or kMagicUnallocated
-inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
- return magic ^ reinterpret_cast<uintptr_t>(ptr);
-}
-
-namespace {
-size_t GetPageSize() {
-#ifdef _WIN32
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
-#elif defined(__wasm__) || defined(__asmjs__)
- return getpagesize();
-#else
- return sysconf(_SC_PAGESIZE);
-#endif
-}
-
-size_t RoundedUpBlockSize() {
- // Round up block sizes to a power of two close to the header size.
- size_t round_up = 16;
- while (round_up < sizeof(AllocList::Header)) {
- round_up += round_up;
- }
- return round_up;
-}
-
-} // namespace
-
-LowLevelAlloc::Arena::Arena(uint32_t flags_value)
- : mu(base_internal::SCHEDULE_KERNEL_ONLY),
- allocation_count(0),
- flags(flags_value),
- pagesize(GetPageSize()),
- round_up(RoundedUpBlockSize()),
- min_size(2 * round_up),
- random(0) {
- freelist.header.size = 0;
- freelist.header.magic =
- Magic(kMagicUnallocated, &freelist.header);
- freelist.header.arena = this;
- freelist.levels = 0;
- memset(freelist.next, 0, sizeof(freelist.next));
-}
-
-// L < meta_data_arena->mu
-LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
- Arena *meta_data_arena = DefaultArena();
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
- meta_data_arena = UnhookedAsyncSigSafeArena();
- } else // NOLINT(readability/braces)
-#endif
- if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
- meta_data_arena = UnhookedArena();
- }
- Arena *result =
- new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
- return result;
-}
-
-// L < arena->mu, L < arena->arena->mu
-bool LowLevelAlloc::DeleteArena(Arena *arena) {
- ABSL_RAW_CHECK(
- arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
- "may not delete default arena");
- ArenaLock section(arena);
- if (arena->allocation_count != 0) {
- section.Leave();
- return false;
- }
- while (arena->freelist.next[0] != nullptr) {
- AllocList *region = arena->freelist.next[0];
- size_t size = region->header.size;
- arena->freelist.next[0] = region->next[0];
- ABSL_RAW_CHECK(
- region->header.magic == Magic(kMagicUnallocated, &region->header),
- "bad magic number in DeleteArena()");
- ABSL_RAW_CHECK(region->header.arena == arena,
- "bad arena pointer in DeleteArena()");
- ABSL_RAW_CHECK(size % arena->pagesize == 0,
- "empty arena has non-page-aligned block size");
- ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
- "empty arena has non-page-aligned block");
- int munmap_result;
-#ifdef _WIN32
- munmap_result = VirtualFree(region, 0, MEM_RELEASE);
- ABSL_RAW_CHECK(munmap_result != 0,
- "LowLevelAlloc::DeleteArena: VitualFree failed");
-#else
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
- munmap_result = munmap(region, size);
- } else {
- munmap_result = base_internal::DirectMunmap(region, size);
- }
-#else
- munmap_result = munmap(region, size);
-#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if (munmap_result != 0) {
- ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
- errno);
- }
-#endif // _WIN32
- }
- section.Leave();
- arena->~Arena();
- Free(arena);
- return true;
-}
-
-// ---------------------------------------------------------------------------
-
-// Addition, checking for overflow. The intent is to die if an external client
-// manages to push through a request that would cause arithmetic to fail.
-static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
- uintptr_t sum = a + b;
- ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
- return sum;
-}
-
-// Return value rounded up to next multiple of align.
-// align must be a power of two.
-static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
- return CheckedAdd(addr, align - 1) & ~(align - 1);
-}
-
-// Equivalent to "return prev->next[i]" but with sanity checking
-// that the freelist is in the correct order, that it
-// consists of regions marked "unallocated", and that no two regions
-// are adjacent in memory (they should have been coalesced).
-// L >= arena->mu
-static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
- ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
- AllocList *next = prev->next[i];
- if (next != nullptr) {
- ABSL_RAW_CHECK(
- next->header.magic == Magic(kMagicUnallocated, &next->header),
- "bad magic number in Next()");
- ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
- if (prev != &arena->freelist) {
- ABSL_RAW_CHECK(prev < next, "unordered freelist");
- ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
- reinterpret_cast<char *>(next),
- "malformed freelist");
- }
- }
- return next;
-}
-
-// Coalesce list item "a" with its successor if they are adjacent.
-static void Coalesce(AllocList *a) {
- AllocList *n = a->next[0];
- if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
- reinterpret_cast<char *>(n)) {
- LowLevelAlloc::Arena *arena = a->header.arena;
- a->header.size += n->header.size;
- n->header.magic = 0;
- n->header.arena = nullptr;
- AllocList *prev[kMaxLevel];
- LLA_SkiplistDelete(&arena->freelist, n, prev);
- LLA_SkiplistDelete(&arena->freelist, a, prev);
- a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
- &arena->random);
- LLA_SkiplistInsert(&arena->freelist, a, prev);
- }
-}
-
-// Adds block at location "v" to the free list
-// L >= arena->mu
-static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
- ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
- "bad magic number in AddToFreelist()");
- ABSL_RAW_CHECK(f->header.arena == arena,
- "bad arena pointer in AddToFreelist()");
- f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
- &arena->random);
- AllocList *prev[kMaxLevel];
- LLA_SkiplistInsert(&arena->freelist, f, prev);
- f->header.magic = Magic(kMagicUnallocated, &f->header);
- Coalesce(f); // maybe coalesce with successor
- Coalesce(prev[0]); // maybe coalesce with predecessor
-}
-
-// Frees storage allocated by LowLevelAlloc::Alloc().
-// L < arena->mu
-void LowLevelAlloc::Free(void *v) {
- if (v != nullptr) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
- LowLevelAlloc::Arena *arena = f->header.arena;
- ArenaLock section(arena);
- AddToFreelist(v, arena);
- ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
- arena->allocation_count--;
- section.Leave();
- }
-}
-
-// allocates and returns a block of size bytes, to be freed with Free()
-// L < arena->mu
-static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
- void *result = nullptr;
- if (request != 0) {
- AllocList *s; // will point to region that satisfies request
- ArenaLock section(arena);
- // round up with header
- size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
- arena->round_up);
- for (;;) { // loop until we find a suitable region
- // find the minimum levels that a block of this size must have
- int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
- if (i < arena->freelist.levels) { // potential blocks exist
- AllocList *before = &arena->freelist; // predecessor of s
- while ((s = Next(i, before, arena)) != nullptr &&
- s->header.size < req_rnd) {
- before = s;
- }
- if (s != nullptr) { // we found a region
- break;
- }
- }
- // we unlock before mmap() both because mmap() may call a callback hook,
- // and because it may be slow.
- arena->mu.Unlock();
- // mmap generous 64K chunks to decrease
- // the chances/impact of fragmentation:
- size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
- void *new_pages;
-#ifdef _WIN32
- new_pages = VirtualAlloc(0, new_pages_size,
- MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
-#else
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
- new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
- PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- } else {
- new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- }
-#else
- new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
-#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- if (new_pages == MAP_FAILED) {
- ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
- }
-
-#endif // _WIN32
- arena->mu.Lock();
- s = reinterpret_cast<AllocList *>(new_pages);
- s->header.size = new_pages_size;
- // Pretend the block is allocated; call AddToFreelist() to free it.
- s->header.magic = Magic(kMagicAllocated, &s->header);
- s->header.arena = arena;
- AddToFreelist(&s->levels, arena); // insert new region into free list
- }
- AllocList *prev[kMaxLevel];
- LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
- // s points to the first free region that's big enough
- if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
- // big enough to split
- AllocList *n = reinterpret_cast<AllocList *>
- (req_rnd + reinterpret_cast<char *>(s));
- n->header.size = s->header.size - req_rnd;
- n->header.magic = Magic(kMagicAllocated, &n->header);
- n->header.arena = arena;
- s->header.size = req_rnd;
- AddToFreelist(&n->levels, arena);
- }
- s->header.magic = Magic(kMagicAllocated, &s->header);
- ABSL_RAW_CHECK(s->header.arena == arena, "");
- arena->allocation_count++;
- section.Leave();
- result = &s->levels;
- }
+#endif
+
+// We must use LowLevelCallOnce here to construct the global arenas, rather than
+// using function-level statics, to avoid recursively invoking the scheduler.
+absl::once_flag create_globals_once;
+
+void CreateGlobalArenas() {
+ new (&default_arena_storage)
+ LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
+ new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ new (&unhooked_async_sig_safe_arena_storage)
+ LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
+#endif
+}
+
+// Returns a global arena that does not call into hooks. Used by NewArena()
+// when kCallMallocHook is not set.
+LowLevelAlloc::Arena* UnhookedArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+}
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+// Returns a global arena that is async-signal safe. Used by NewArena() when
+// kAsyncSignalSafe is set.
+LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(
+ &unhooked_async_sig_safe_arena_storage);
+}
+#endif
+
+} // namespace
+
+// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+ base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+ return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+}
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class ABSL_SCOPED_LOCKABLE ArenaLock {
+ public:
+ explicit ArenaLock(LowLevelAlloc::Arena *arena)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+ : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ sigset_t all;
+ sigfillset(&all);
+ mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+ }
+#endif
+ arena_->mu.Lock();
+ }
+ ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+ void Leave() ABSL_UNLOCK_FUNCTION() {
+ arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (mask_valid_) {
+ const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
+ }
+ }
+#endif
+ left_ = true;
+ }
+
+ private:
+ bool left_ = false; // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ bool mask_valid_ = false;
+ sigset_t mask_; // old mask of blocked signals
+#endif
+ LowLevelAlloc::Arena *arena_;
+ ArenaLock(const ArenaLock &) = delete;
+ ArenaLock &operator=(const ArenaLock &) = delete;
+};
+} // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+ return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+namespace {
+size_t GetPageSize() {
+#ifdef _WIN32
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
+#elif defined(__wasm__) || defined(__asmjs__)
+ return getpagesize();
+#else
+ return sysconf(_SC_PAGESIZE);
+#endif
+}
+
+size_t RoundedUpBlockSize() {
+ // Round up block sizes to a power of two close to the header size.
+ size_t round_up = 16;
+ while (round_up < sizeof(AllocList::Header)) {
+ round_up += round_up;
+ }
+ return round_up;
+}
+
+} // namespace
+
+LowLevelAlloc::Arena::Arena(uint32_t flags_value)
+ : mu(base_internal::SCHEDULE_KERNEL_ONLY),
+ allocation_count(0),
+ flags(flags_value),
+ pagesize(GetPageSize()),
+ round_up(RoundedUpBlockSize()),
+ min_size(2 * round_up),
+ random(0) {
+ freelist.header.size = 0;
+ freelist.header.magic =
+ Magic(kMagicUnallocated, &freelist.header);
+ freelist.header.arena = this;
+ freelist.levels = 0;
+ memset(freelist.next, 0, sizeof(freelist.next));
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
+ Arena *meta_data_arena = DefaultArena();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ meta_data_arena = UnhookedAsyncSigSafeArena();
+ } else // NOLINT(readability/braces)
+#endif
+ if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+ meta_data_arena = UnhookedArena();
+ }
+ Arena *result =
+ new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+ return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+ ABSL_RAW_CHECK(
+ arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
+ "may not delete default arena");
+ ArenaLock section(arena);
+ if (arena->allocation_count != 0) {
+ section.Leave();
+ return false;
+ }
+ while (arena->freelist.next[0] != nullptr) {
+ AllocList *region = arena->freelist.next[0];
+ size_t size = region->header.size;
+ arena->freelist.next[0] = region->next[0];
+ ABSL_RAW_CHECK(
+ region->header.magic == Magic(kMagicUnallocated, &region->header),
+ "bad magic number in DeleteArena()");
+ ABSL_RAW_CHECK(region->header.arena == arena,
+ "bad arena pointer in DeleteArena()");
+ ABSL_RAW_CHECK(size % arena->pagesize == 0,
+ "empty arena has non-page-aligned block size");
+ ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+ "empty arena has non-page-aligned block");
+ int munmap_result;
+#ifdef _WIN32
+ munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+ ABSL_RAW_CHECK(munmap_result != 0,
+ "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+ munmap_result = munmap(region, size);
+ } else {
+ munmap_result = base_internal::DirectMunmap(region, size);
+ }
+#else
+ munmap_result = munmap(region, size);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (munmap_result != 0) {
+ ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+ errno);
+ }
+#endif // _WIN32
+ }
+ section.Leave();
+ arena->~Arena();
+ Free(arena);
+ return true;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow. The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+ uintptr_t sum = a + b;
+ ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+ return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+ return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L >= arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+ ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+ AllocList *next = prev->next[i];
+ if (next != nullptr) {
+ ABSL_RAW_CHECK(
+ next->header.magic == Magic(kMagicUnallocated, &next->header),
+ "bad magic number in Next()");
+ ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+ if (prev != &arena->freelist) {
+ ABSL_RAW_CHECK(prev < next, "unordered freelist");
+ ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+ reinterpret_cast<char *>(next),
+ "malformed freelist");
+ }
+ }
+ return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+ AllocList *n = a->next[0];
+ if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+ reinterpret_cast<char *>(n)) {
+ LowLevelAlloc::Arena *arena = a->header.arena;
+ a->header.size += n->header.size;
+ n->header.magic = 0;
+ n->header.arena = nullptr;
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistDelete(&arena->freelist, n, prev);
+ LLA_SkiplistDelete(&arena->freelist, a, prev);
+ a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
+ &arena->random);
+ LLA_SkiplistInsert(&arena->freelist, a, prev);
+ }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+ AllocList *f = reinterpret_cast<AllocList *>(
+ reinterpret_cast<char *>(v) - sizeof (f->header));
+ ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+ "bad magic number in AddToFreelist()");
+ ABSL_RAW_CHECK(f->header.arena == arena,
+ "bad arena pointer in AddToFreelist()");
+ f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
+ &arena->random);
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistInsert(&arena->freelist, f, prev);
+ f->header.magic = Magic(kMagicUnallocated, &f->header);
+ Coalesce(f); // maybe coalesce with successor
+ Coalesce(prev[0]); // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+ if (v != nullptr) {
+ AllocList *f = reinterpret_cast<AllocList *>(
+ reinterpret_cast<char *>(v) - sizeof (f->header));
+ LowLevelAlloc::Arena *arena = f->header.arena;
+ ArenaLock section(arena);
+ AddToFreelist(v, arena);
+ ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+ arena->allocation_count--;
+ section.Leave();
+ }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+ void *result = nullptr;
+ if (request != 0) {
+ AllocList *s; // will point to region that satisfies request
+ ArenaLock section(arena);
+ // round up with header
+ size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
+ arena->round_up);
+ for (;;) { // loop until we find a suitable region
+ // find the minimum levels that a block of this size must have
+ int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+ if (i < arena->freelist.levels) { // potential blocks exist
+ AllocList *before = &arena->freelist; // predecessor of s
+ while ((s = Next(i, before, arena)) != nullptr &&
+ s->header.size < req_rnd) {
+ before = s;
+ }
+ if (s != nullptr) { // we found a region
+ break;
+ }
+ }
+ // we unlock before mmap() both because mmap() may call a callback hook,
+ // and because it may be slow.
+ arena->mu.Unlock();
+ // mmap generous 64K chunks to decrease
+ // the chances/impact of fragmentation:
+ size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+ void *new_pages;
+#ifdef _WIN32
+ new_pages = VirtualAlloc(0, new_pages_size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
+ PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ } else {
+ new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ }
+#else
+ new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ if (new_pages == MAP_FAILED) {
+ ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+ }
+
+#endif // _WIN32
+ arena->mu.Lock();
+ s = reinterpret_cast<AllocList *>(new_pages);
+ s->header.size = new_pages_size;
+ // Pretend the block is allocated; call AddToFreelist() to free it.
+ s->header.magic = Magic(kMagicAllocated, &s->header);
+ s->header.arena = arena;
+ AddToFreelist(&s->levels, arena); // insert new region into free list
+ }
+ AllocList *prev[kMaxLevel];
+ LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
+ // s points to the first free region that's big enough
+ if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+ // big enough to split
+ AllocList *n = reinterpret_cast<AllocList *>
+ (req_rnd + reinterpret_cast<char *>(s));
+ n->header.size = s->header.size - req_rnd;
+ n->header.magic = Magic(kMagicAllocated, &n->header);
+ n->header.arena = arena;
+ s->header.size = req_rnd;
+ AddToFreelist(&n->levels, arena);
+ }
+ s->header.magic = Magic(kMagicAllocated, &s->header);
+ ABSL_RAW_CHECK(s->header.arena == arena, "");
+ arena->allocation_count++;
+ section.Leave();
+ result = &s->levels;
+ }
ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
- return result;
-}
-
-void *LowLevelAlloc::Alloc(size_t request) {
- void *result = DoAllocWithArena(request, DefaultArena());
- return result;
-}
-
-void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
- ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
- void *result = DoAllocWithArena(request, arena);
- return result;
-}
-
-} // namespace base_internal
+ return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+ void *result = DoAllocWithArena(request, DefaultArena());
+ return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+ ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+ void *result = DoAllocWithArena(request, arena);
+ return result;
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.h b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.h
index aeab7107f4..db91951c82 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc.h
@@ -1,126 +1,126 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
-#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
-
-// A simple thread-safe memory allocator that does not depend on
-// mutexes or thread-specific data. It is intended to be used
-// sparingly, and only when malloc() would introduce an unwanted
-// dependency, such as inside the heap-checker, or the Mutex
-// implementation.
-
-// IWYU pragma: private, include "base/low_level_alloc.h"
-
-#include <sys/types.h>
-
-#include <cstdint>
-
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-
-// LowLevelAlloc requires that the platform support low-level
-// allocation of virtual memory. Platforms lacking this cannot use
-// LowLevelAlloc.
-#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
-#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
-#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
-#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
-#endif
-
-// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
-// asm.js / WebAssembly.
-// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
-// for more information.
-#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
-#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
-#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
-#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
-#endif
-
-#include <cstddef>
-
-#include "absl/base/port.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data. It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <sys/types.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
+// asm.js / WebAssembly.
+// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+// for more information.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-class LowLevelAlloc {
- public:
- struct Arena; // an arena from which memory may be allocated
-
- // Returns a pointer to a block of at least "request" bytes
- // that have been newly allocated from the specific arena.
- // for Alloc() call the DefaultArena() is used.
- // Returns 0 if passed request==0.
- // Does not return 0 under other circumstances; it crashes if memory
- // is not available.
- static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
- static void *AllocWithArena(size_t request, Arena *arena)
- ABSL_ATTRIBUTE_SECTION(malloc_hook);
-
- // Deallocates a region of memory that was previously allocated with
- // Alloc(). Does nothing if passed 0. "s" must be either 0,
- // or must have been returned from a call to Alloc() and not yet passed to
- // Free() since that call to Alloc(). The space is returned to the arena
- // from which it was allocated.
- static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
-
- // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
- // are to put all callers of MallocHook::Invoke* in this module
- // into special section,
- // so that MallocHook::GetCallerStackTrace can function accurately.
-
- // Create a new arena.
- // The root metadata for the new arena is allocated in the
- // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
- // These values may be ored into flags:
- enum {
- // Report calls to Alloc() and Free() via the MallocHook interface.
- // Set in the DefaultArena.
- kCallMallocHook = 0x0001,
-
-#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
- // Make calls to Alloc(), Free() be async-signal-safe. Not set in
- // DefaultArena(). Not supported on all platforms.
- kAsyncSignalSafe = 0x0002,
-#endif
- };
- // Construct a new arena. The allocation of the underlying metadata honors
- // the provided flags. For example, the call NewArena(kAsyncSignalSafe)
- // is itself async-signal-safe, as well as generatating an arena that provides
- // async-signal-safe Alloc/Free.
- static Arena *NewArena(int32_t flags);
-
- // Destroys an arena allocated by NewArena and returns true,
- // provided no allocated blocks remain in the arena.
- // If allocated blocks remain in the arena, does nothing and
- // returns false.
- // It is illegal to attempt to destroy the DefaultArena().
- static bool DeleteArena(Arena *arena);
-
- // The default arena that always exists.
- static Arena *DefaultArena();
-
- private:
- LowLevelAlloc(); // no instances
-};
-
-} // namespace base_internal
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+ struct Arena; // an arena from which memory may be allocated
+
+ // Returns a pointer to a block of at least "request" bytes
+ // that have been newly allocated from the specific arena.
+ // for Alloc() call the DefaultArena() is used.
+ // Returns 0 if passed request==0.
+ // Does not return 0 under other circumstances; it crashes if memory
+ // is not available.
+ static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+ static void *AllocWithArena(size_t request, Arena *arena)
+ ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+ // Deallocates a region of memory that was previously allocated with
+ // Alloc(). Does nothing if passed 0. "s" must be either 0,
+ // or must have been returned from a call to Alloc() and not yet passed to
+ // Free() since that call to Alloc(). The space is returned to the arena
+ // from which it was allocated.
+ static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+ // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+ // are to put all callers of MallocHook::Invoke* in this module
+ // into special section,
+ // so that MallocHook::GetCallerStackTrace can function accurately.
+
+ // Create a new arena.
+ // The root metadata for the new arena is allocated in the
+ // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+ // These values may be ored into flags:
+ enum {
+ // Report calls to Alloc() and Free() via the MallocHook interface.
+ // Set in the DefaultArena.
+ kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+ // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+ // DefaultArena(). Not supported on all platforms.
+ kAsyncSignalSafe = 0x0002,
+#endif
+ };
+ // Construct a new arena. The allocation of the underlying metadata honors
+ // the provided flags. For example, the call NewArena(kAsyncSignalSafe)
+ // is itself async-signal-safe, as well as generatating an arena that provides
+ // async-signal-safe Alloc/Free.
+ static Arena *NewArena(int32_t flags);
+
+ // Destroys an arena allocated by NewArena and returns true,
+ // provided no allocated blocks remain in the arena.
+ // If allocated blocks remain in the arena, does nothing and
+ // returns false.
+ // It is illegal to attempt to destroy the DefaultArena().
+ static bool DeleteArena(Arena *arena);
+
+ // The default arena that always exists.
+ static Arena *DefaultArena();
+
+ private:
+ LowLevelAlloc(); // no instances
+};
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc/ya.make b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc/ya.make
index 93607c3ef0..df53191043 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc/ya.make
@@ -1,36 +1,36 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
-
-SRCS(
- low_level_alloc.cc
-)
-
-END()
+SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
+
+SRCS(
+ low_level_alloc.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
index 8c81e96ffc..9baccc0659 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/low_level_scheduling.h
@@ -1,34 +1,34 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Core interfaces and definitions used by by low-level interfaces such as
-// SpinLock.
-
-#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
-#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/scheduling_mode.h"
-#include "absl/base/macros.h"
-
-// The following two declarations exist so SchedulingGuard may friend them with
-// the appropriate language linkage. These callbacks allow libc internals, such
-// as function level statics, to schedule cooperatively when locking.
-extern "C" bool __google_disable_rescheduling(void);
-extern "C" void __google_enable_rescheduling(bool disable_result);
-
-namespace absl {
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage. These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
class CondVar;
class Mutex;
@@ -37,55 +37,55 @@ namespace synchronization_internal {
int MutexDelay(int32_t c, int mode);
} // namespace synchronization_internal
-namespace base_internal {
-
-class SchedulingHelper; // To allow use of SchedulingGuard.
-class SpinLock; // To allow use of SchedulingGuard.
-
-// SchedulingGuard
-// Provides guard semantics that may be used to disable cooperative rescheduling
-// of the calling thread within specific program blocks. This is used to
-// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
-// scheduling depends on.
-//
-// Domain implementations capable of rescheduling in reaction to involuntary
-// kernel thread actions (e.g blocking due to a pagefault or syscall) must
-// guarantee that an annotated thread is not allowed to (cooperatively)
-// reschedule until the annotated region is complete.
-//
-// It is an error to attempt to use a cooperatively scheduled resource (e.g.
-// Mutex) within a rescheduling-disabled region.
-//
-// All methods are async-signal safe.
-class SchedulingGuard {
- public:
- // Returns true iff the calling thread may be cooperatively rescheduled.
- static bool ReschedulingIsAllowed();
+namespace base_internal {
+
+class SchedulingHelper; // To allow use of SchedulingGuard.
+class SpinLock; // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks. This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+ // Returns true iff the calling thread may be cooperatively rescheduled.
+ static bool ReschedulingIsAllowed();
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
-
- private:
- // Disable cooperative rescheduling of the calling thread. It may still
- // initiate scheduling operations (e.g. wake-ups), however, it may not itself
- // reschedule. Nestable. The returned result is opaque, clients should not
- // attempt to interpret it.
- // REQUIRES: Result must be passed to a pairing EnableScheduling().
- static bool DisableRescheduling();
-
- // Marks the end of a rescheduling disabled region, previously started by
- // DisableRescheduling().
- // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
- static void EnableRescheduling(bool disable_result);
-
- // A scoped helper for {Disable, Enable}Rescheduling().
- // REQUIRES: destructor must run in same thread as constructor.
- struct ScopedDisable {
- ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
- ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
-
- bool disabled;
- };
-
+
+ private:
+ // Disable cooperative rescheduling of the calling thread. It may still
+ // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+ // reschedule. Nestable. The returned result is opaque, clients should not
+ // attempt to interpret it.
+ // REQUIRES: Result must be passed to a pairing EnableScheduling().
+ static bool DisableRescheduling();
+
+ // Marks the end of a rescheduling disabled region, previously started by
+ // DisableRescheduling().
+ // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+ static void EnableRescheduling(bool disable_result);
+
+ // A scoped helper for {Disable, Enable}Rescheduling().
+ // REQUIRES: destructor must run in same thread as constructor.
+ struct ScopedDisable {
+ ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+ ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+ bool disabled;
+ };
+
// A scoped helper to enable rescheduling temporarily.
// REQUIRES: destructor must run in same thread as constructor.
class ScopedEnable {
@@ -100,35 +100,35 @@ class SchedulingGuard {
// Access to SchedulingGuard is explicitly permitted.
friend class absl::CondVar;
friend class absl::Mutex;
- friend class SchedulingHelper;
- friend class SpinLock;
+ friend class SchedulingHelper;
+ friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
-};
-
-//------------------------------------------------------------------------------
-// End of public interfaces.
-//------------------------------------------------------------------------------
-
-inline bool SchedulingGuard::ReschedulingIsAllowed() {
- return false;
-}
-
-inline bool SchedulingGuard::DisableRescheduling() {
- return false;
-}
-
-inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
- return;
-}
-
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+ return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+ return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+ return;
+}
+
inline SchedulingGuard::ScopedEnable::ScopedEnable()
: scheduling_disabled_depth_(0) {}
inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
}
-} // namespace base_internal
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/per_thread_tls.h b/contrib/restricted/abseil-cpp/absl/base/internal/per_thread_tls.h
index 131b5f354c..cf5e97a047 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/per_thread_tls.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/per_thread_tls.h
@@ -1,52 +1,52 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
-#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
-
-// This header defines two macros:
-//
-// If the platform supports thread-local storage:
-//
-// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
-// thread-local variable
-// * ABSL_PER_THREAD_TLS is 1
-//
-// Otherwise:
-//
-// * ABSL_PER_THREAD_TLS_KEYWORD is empty
-// * ABSL_PER_THREAD_TLS is 0
-//
-// Microsoft C supports thread-local storage.
-// GCC supports it if the appropriate version of glibc is available,
-// which the programmer can indicate by defining ABSL_HAVE_TLS
-
-#include "absl/base/port.h" // For ABSL_HAVE_TLS
-
-#if defined(ABSL_PER_THREAD_TLS)
-#error ABSL_PER_THREAD_TLS cannot be directly set
-#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
-#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
-#elif defined(ABSL_HAVE_TLS)
-#define ABSL_PER_THREAD_TLS_KEYWORD __thread
-#define ABSL_PER_THREAD_TLS 1
-#elif defined(_MSC_VER)
-#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
-#define ABSL_PER_THREAD_TLS 1
-#else
-#define ABSL_PER_THREAD_TLS_KEYWORD
-#define ABSL_PER_THREAD_TLS 0
-#endif
-
-#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+//
+// If the platform supports thread-local storage:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+// thread-local variable
+// * ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is empty
+// * ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programmer can indicate by defining ABSL_HAVE_TLS
+
+#include "absl/base/port.h" // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/pretty_function.h b/contrib/restricted/abseil-cpp/absl/base/internal/pretty_function.h
index bcf6bf56de..35d51676dc 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/pretty_function.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/pretty_function.h
@@ -1,33 +1,33 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
-#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
-
-// ABSL_PRETTY_FUNCTION
-//
-// In C++11, __func__ gives the undecorated name of the current function. That
-// is, "main", not "int main()". Various compilers give extra macros to get the
-// decorated function name, including return type and arguments, to
-// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable
-// version of these macros which forwards to the correct macro on each compiler.
-#if defined(_MSC_VER)
-#define ABSL_PRETTY_FUNCTION __FUNCSIG__
-#elif defined(__GNUC__)
-#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
-#else
-#error "Unsupported compiler"
-#endif
-
-#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function. That
+// is, "main", not "int main()". Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
index 34d317c217..074e026adb 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.cc
@@ -1,188 +1,188 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/raw_logging.h"
-
-#include <stddef.h>
-#include <cstdarg>
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-#include "absl/base/internal/atomic_hook.h"
-#include "absl/base/log_severity.h"
-
-// We know how to perform low-level writes to stderr in POSIX and Windows. For
-// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
-// Much of raw_logging.cc becomes a no-op when we can't output messages,
-// although a FATAL ABSL_RAW_LOG message will still abort the process.
-
-// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
-// (as from unistd.h)
-//
-// This preprocessor token is also defined in raw_io.cc. If you need to copy
-// this, consider moving both to config.h instead.
-#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
- defined(__Fuchsia__) || defined(__native_client__) || \
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/raw_logging.h"
+
+#include <stddef.h>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows. For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc. If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__Fuchsia__) || defined(__native_client__) || \
defined(__EMSCRIPTEN__) || defined(__ASYLO__)
-#include <unistd.h>
-
-#define ABSL_HAVE_POSIX_WRITE 1
-#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
-#else
-#undef ABSL_HAVE_POSIX_WRITE
-#endif
-
-// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
-// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
-// for low level operations that want to avoid libc.
-#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
-#include <sys/syscall.h>
-#define ABSL_HAVE_SYSCALL_WRITE 1
-#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
-#else
-#undef ABSL_HAVE_SYSCALL_WRITE
-#endif
-
-#ifdef _WIN32
-#include <io.h>
-
-#define ABSL_HAVE_RAW_IO 1
-#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
-#else
-#undef ABSL_HAVE_RAW_IO
-#endif
-
+#include <unistd.h>
+
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_logging_internal {
namespace {
-// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
// a selected set of platforms for which we expect not to be able to raw log.
-
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<LogPrefixHook>
log_prefix_hook;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<AbortHook>
abort_hook;
-
-#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
constexpr char kTruncated[] = " ... (message truncated)\n";
-
-// sprintf the format to the buffer, adjusting *buf and *size to reflect the
-// consumed bytes, and return whether the message fit without truncation. If
-// truncation occurred, if possible leave room in the buffer for the message
-// kTruncated[].
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation. If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
ABSL_PRINTF_ATTRIBUTE(3, 0);
bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
- int n = vsnprintf(*buf, *size, format, ap);
- bool result = true;
- if (n < 0 || n > *size) {
- result = false;
- if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
- n = *size - sizeof(kTruncated); // room for truncation message
- } else {
+ int n = vsnprintf(*buf, *size, format, ap);
+ bool result = true;
+ if (n < 0 || n > *size) {
+ result = false;
+ if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+ n = *size - sizeof(kTruncated); // room for truncation message
+ } else {
n = 0; // no room for truncation message
- }
- }
- *size -= n;
- *buf += n;
- return result;
-}
-#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
-
+ }
+ }
+ *size -= n;
+ *buf += n;
+ return result;
+}
+#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
constexpr int kLogBufSize = 3000;
-
-// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
-// that invoke malloc() and getenv() that might acquire some locks.
-
-// Helper for RawLog below.
-// *DoRawLog writes to *buf of *size and move them past the written portion.
-// It returns true iff there was no overflow or error.
-bool DoRawLog(char** buf, int* size, const char* format, ...)
- ABSL_PRINTF_ATTRIBUTE(3, 4);
-bool DoRawLog(char** buf, int* size, const char* format, ...) {
- va_list ap;
- va_start(ap, format);
- int n = vsnprintf(*buf, *size, format, ap);
- va_end(ap);
- if (n < 0 || n > *size) return false;
- *size -= n;
- *buf += n;
- return true;
-}
-
-void RawLogVA(absl::LogSeverity severity, const char* file, int line,
- const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
-void RawLogVA(absl::LogSeverity severity, const char* file, int line,
- const char* format, va_list ap) {
- char buffer[kLogBufSize];
- char* buf = buffer;
- int size = sizeof(buffer);
-#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
- bool enabled = true;
-#else
- bool enabled = false;
-#endif
-
-#ifdef ABSL_MIN_LOG_LEVEL
- if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
- severity < absl::LogSeverity::kFatal) {
- enabled = false;
- }
-#endif
-
- auto log_prefix_hook_ptr = log_prefix_hook.Load();
- if (log_prefix_hook_ptr) {
- enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
- } else {
- if (enabled) {
- DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
- }
- }
- const char* const prefix_end = buf;
-
-#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
- if (enabled) {
- bool no_chop = VADoRawLog(&buf, &size, format, ap);
- if (no_chop) {
- DoRawLog(&buf, &size, "\n");
- } else {
- DoRawLog(&buf, &size, "%s", kTruncated);
- }
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+ ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ int n = vsnprintf(*buf, *size, format, ap);
+ va_end(ap);
+ if (n < 0 || n > *size) return false;
+ *size -= n;
+ *buf += n;
+ return true;
+}
+
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+ const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+ const char* format, va_list ap) {
+ char buffer[kLogBufSize];
+ char* buf = buffer;
+ int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ bool enabled = true;
+#else
+ bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+ if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
+ severity < absl::LogSeverity::kFatal) {
+ enabled = false;
+ }
+#endif
+
+ auto log_prefix_hook_ptr = log_prefix_hook.Load();
+ if (log_prefix_hook_ptr) {
+ enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
+ } else {
+ if (enabled) {
+ DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
+ }
+ }
+ const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ if (enabled) {
+ bool no_chop = VADoRawLog(&buf, &size, format, ap);
+ if (no_chop) {
+ DoRawLog(&buf, &size, "\n");
+ } else {
+ DoRawLog(&buf, &size, "%s", kTruncated);
+ }
SafeWriteToStderr(buffer, strlen(buffer));
- }
-#else
- static_cast<void>(format);
- static_cast<void>(ap);
-#endif
-
- // Abort the process after logging a FATAL message, even if the output itself
- // was suppressed.
- if (severity == absl::LogSeverity::kFatal) {
- abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
- abort();
- }
-}
-
+ }
+#else
+ static_cast<void>(format);
+ static_cast<void>(ap);
+#endif
+
+ // Abort the process after logging a FATAL message, even if the output itself
+ // was suppressed.
+ if (severity == absl::LogSeverity::kFatal) {
+ abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+ abort();
+ }
+}
+
// Non-formatting version of RawLog().
//
// TODO(gfalcon): When string_view no longer depends on base, change this
@@ -193,50 +193,50 @@ void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
message.data());
}
-} // namespace
-
-void SafeWriteToStderr(const char *s, size_t len) {
-#if defined(ABSL_HAVE_SYSCALL_WRITE)
- syscall(SYS_write, STDERR_FILENO, s, len);
-#elif defined(ABSL_HAVE_POSIX_WRITE)
- write(STDERR_FILENO, s, len);
-#elif defined(ABSL_HAVE_RAW_IO)
- _write(/* stderr */ 2, s, len);
-#else
- // stderr logging unsupported on this platform
- (void) s;
- (void) len;
-#endif
-}
-
-void RawLog(absl::LogSeverity severity, const char* file, int line,
- const char* format, ...) {
- va_list ap;
- va_start(ap, format);
- RawLogVA(severity, file, line, format, ap);
- va_end(ap);
-}
-
-bool RawLoggingFullySupported() {
-#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
- return true;
-#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
- return false;
-#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
-}
-
+} // namespace
+
+void SafeWriteToStderr(const char *s, size_t len) {
+#if defined(ABSL_HAVE_SYSCALL_WRITE)
+ syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+ write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+ _write(/* stderr */ 2, s, len);
+#else
+ // stderr logging unsupported on this platform
+ (void) s;
+ (void) len;
+#endif
+}
+
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+ const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ RawLogVA(severity, file, line, format, ap);
+ va_end(ap);
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ return true;
+#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+ return false;
+#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
-
+
void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
-void RegisterInternalLogFunction(InternalLogFunction func) {
- internal_log_function.Store(func);
-}
-
-} // namespace raw_logging_internal
+void RegisterInternalLogFunction(InternalLogFunction func) {
+ internal_log_function.Store(func);
+}
+
+} // namespace raw_logging_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
index d0e5d75815..2bf7aabac1 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging.h
@@ -1,77 +1,77 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Thread-safe logging routines that do not allocate any memory or
-// acquire any locks, and can therefore be used by low-level memory
-// allocation, synchronization, and signal-handling code.
-
-#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
-#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
-
-#include <string>
-
-#include "absl/base/attributes.h"
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include <string>
+
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/atomic_hook.h"
-#include "absl/base/log_severity.h"
-#include "absl/base/macros.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
-#include "absl/base/port.h"
-
-// This is similar to LOG(severity) << format..., but
-// * it is to be used ONLY by low-level modules that can't use normal LOG()
-// * it is designed to be a low-level logger that does not allocate any
-// memory and does not need any locks, hence:
-// * it logs straight and ONLY to STDERR w/o buffering
-// * it uses an explicit printf-format and arguments list
-// * it will silently chop off really long message strings
-// Usage example:
-// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
-// This will print an almost standard log line like this to stderr only:
-// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
-
-#define ABSL_RAW_LOG(severity, ...) \
- do { \
- constexpr const char* absl_raw_logging_internal_basename = \
- ::absl::raw_logging_internal::Basename(__FILE__, \
- sizeof(__FILE__) - 1); \
- ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
- absl_raw_logging_internal_basename, \
- __LINE__, __VA_ARGS__); \
- } while (0)
-
-// Similar to CHECK(condition) << message, but for low-level modules:
-// we use only ABSL_RAW_LOG that does not allocate memory.
-// We do not want to provide args list here to encourage this usage:
-// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
-// so that the args are not computed when not needed.
-#define ABSL_RAW_CHECK(condition, message) \
- do { \
- if (ABSL_PREDICT_FALSE(!(condition))) { \
- ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
- } \
- } while (0)
-
-// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
-// except that if the richer log library is linked into the binary, we dispatch
-// to that instead. This is potentially useful for internal logging and
-// assertions, where we are using RAW_LOG neither for its async-signal-safety
-// nor for its non-allocating nature, but rather because raw logging has very
-// few other dependencies.
-//
-// The API is a subset of the above: each macro only takes two arguments. Use
-// StrCat if you need to build a richer message.
+#include "absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+// memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+
+#define ABSL_RAW_LOG(severity, ...) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_basename = \
+ ::absl::raw_logging_internal::Basename(__FILE__, \
+ sizeof(__FILE__) - 1); \
+ ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_basename, \
+ __LINE__, __VA_ARGS__); \
+ } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(condition))) { \
+ ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+ } \
+ } while (0)
+
+// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
+// except that if the richer log library is linked into the binary, we dispatch
+// to that instead. This is potentially useful for internal logging and
+// assertions, where we are using RAW_LOG neither for its async-signal-safety
+// nor for its non-allocating nature, but rather because raw logging has very
+// few other dependencies.
+//
+// The API is a subset of the above: each macro only takes two arguments. Use
+// StrCat if you need to build a richer message.
#define ABSL_INTERNAL_LOG(severity, message) \
do { \
constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
@@ -80,104 +80,104 @@
absl_raw_logging_internal_filename, __LINE__, message); \
if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
ABSL_INTERNAL_UNREACHABLE; \
- } while (0)
-
-#define ABSL_INTERNAL_CHECK(condition, message) \
- do { \
- if (ABSL_PREDICT_FALSE(!(condition))) { \
- std::string death_message = "Check " #condition " failed: "; \
- death_message += std::string(message); \
- ABSL_INTERNAL_LOG(FATAL, death_message); \
- } \
- } while (0)
-
-#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
-#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
-#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
-#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
-#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
- ::absl::NormalizeLogSeverity(severity)
-
-namespace absl {
+ } while (0)
+
+#define ABSL_INTERNAL_CHECK(condition, message) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(condition))) { \
+ std::string death_message = "Check " #condition " failed: "; \
+ death_message += std::string(message); \
+ ABSL_INTERNAL_LOG(FATAL, death_message); \
+ } \
+ } while (0)
+
+#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+ ::absl::NormalizeLogSeverity(severity)
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
-
-// Helper function to implement ABSL_RAW_LOG
-// Logs format... at "severity" level, reporting it
-// as called from file:line.
-// This does not allocate memory or acquire locks.
-void RawLog(absl::LogSeverity severity, const char* file, int line,
- const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
-
-// Writes the provided buffer directly to stderr, in a safe, low-level manner.
-//
-// In POSIX this means calling write(), which is async-signal safe and does
-// not malloc. If the platform supports the SYS_write syscall, we invoke that
-// directly to side-step any libc interception.
-void SafeWriteToStderr(const char *s, size_t len);
-
-// compile-time function to get the "base" filename, that is, the part of
-// a filename after the last "/" or "\" path separator. The search starts at
-// the end of the string; the second parameter is the length of the string.
-constexpr const char* Basename(const char* fname, int offset) {
- return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
- ? fname + offset
- : Basename(fname, offset - 1);
-}
-
-// For testing only.
-// Returns true if raw logging is fully supported. When it is not
-// fully supported, no messages will be emitted, but a log at FATAL
-// severity will cause an abort.
-//
-// TODO(gfalcon): Come up with a better name for this method.
-bool RawLoggingFullySupported();
-
-// Function type for a raw_logging customization hook for suppressing messages
-// by severity, and for writing custom prefixes on non-suppressed messages.
-//
-// The installed hook is called for every raw log invocation. The message will
-// be logged to stderr only if the hook returns true. FATAL errors will cause
-// the process to abort, even if writing to stderr is suppressed. The hook is
-// also provided with an output buffer, where it can write a custom log message
-// prefix.
-//
-// The raw_logging system does not allocate memory or grab locks. User-provided
-// hooks must avoid these operations, and must not throw exceptions.
-//
-// 'severity' is the severity level of the message being written.
-// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
-// was located.
-// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
-// hook writes a prefix, it must increment *buffer and decrement *buf_size
-// accordingly.
-using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
- int line, char** buffer, int* buf_size);
-
-// Function type for a raw_logging customization hook called to abort a process
-// when a FATAL message is logged. If the provided AbortHook() returns, the
-// logging system will call abort().
-//
-// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
-// was located.
+namespace raw_logging_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+ const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// Writes the provided buffer directly to stderr, in a safe, low-level manner.
+//
+// In POSIX this means calling write(), which is async-signal safe and does
+// not malloc. If the platform supports the SYS_write syscall, we invoke that
+// directly to side-step any libc interception.
+void SafeWriteToStderr(const char *s, size_t len);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator. The search starts at
+// the end of the string; the second parameter is the length of the string.
+constexpr const char* Basename(const char* fname, int offset) {
+ return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+ ? fname + offset
+ : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_logging customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation. The message will
+// be logged to stderr only if the hook returns true. FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed. The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_logging system does not allocate memory or grab locks. User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
+// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// accordingly.
+using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
+ int line, char** buffer, int* buf_size);
+
+// Function type for a raw_logging customization hook called to abort a process
+// when a FATAL message is logged. If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
// The NUL-terminated logged message lives in the buffer between 'buf_start'
-// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
-// buffer (as written by the LogPrefixHook.)
-using AbortHook = void (*)(const char* file, int line, const char* buf_start,
- const char* prefix_end, const char* buf_end);
-
-// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
-//
-// TODO(gfalcon): When string_view no longer depends on base, change this
-// interface to take its message as a string_view instead.
-using InternalLogFunction = void (*)(absl::LogSeverity severity,
- const char* file, int line,
- const std::string& message);
-
+// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogPrefixHook.)
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+ const char* prefix_end, const char* buf_end);
+
+// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+using InternalLogFunction = void (*)(absl::LogSeverity severity,
+ const char* file, int line,
+ const std::string& message);
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
-
+
// Registers hooks of the above types. Only a single hook of each type may be
// registered. It is an error to call these functions multiple times with
// different input arguments.
@@ -186,10 +186,10 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
// not block or malloc, and are async-signal safe.
void RegisterLogPrefixHook(LogPrefixHook func);
void RegisterAbortHook(AbortHook func);
-void RegisterInternalLogFunction(InternalLogFunction func);
-
-} // namespace raw_logging_internal
+void RegisterInternalLogFunction(InternalLogFunction func);
+
+} // namespace raw_logging_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging/ya.make b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging/ya.make
index 8e1f688429..7d51c953ee 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/raw_logging/ya.make
@@ -1,33 +1,33 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
-
-SRCS(
- raw_logging.cc
-)
-
-END()
+SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
+
+SRCS(
+ raw_logging.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/scheduling_mode.h b/contrib/restricted/abseil-cpp/absl/base/internal/scheduling_mode.h
index 38c2dd92ce..8be5ab6dd3 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/scheduling_mode.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/scheduling_mode.h
@@ -1,58 +1,58 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Core interfaces and definitions used by by low-level interfaces such as
-// SpinLock.
-
-#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
-#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// Used to describe how a thread may be scheduled. Typically associated with
-// the declaration of a resource supporting synchronized access.
-//
-// SCHEDULE_COOPERATIVE_AND_KERNEL:
-// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
-// reschedule (using base::scheduling semantics); allowing other cooperative
-// threads to proceed.
-//
-// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
-// Specifies that no cooperative scheduling semantics may be used, even if the
-// current thread is itself cooperatively scheduled. This means that
-// cooperative threads will NOT allow other cooperative threads to execute in
-// their place while waiting for a resource of this type. Host operating system
-// semantics (e.g. a futex) may still be used.
-//
-// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
-// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
-// base::scheduling (e.g. the implementation of a Scheduler) may depend.
-//
-// NOTE: Cooperative resources may not be nested below non-cooperative ones.
-// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
-// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
-enum SchedulingMode {
- SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
- SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
-};
-
-} // namespace base_internal
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled. Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled. This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type. Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+ SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
+ SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
+};
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.cc b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.cc
index 36e4c83fa9..8a934cb511 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.cc
@@ -1,81 +1,81 @@
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/scoped_set_env.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#endif
-
-#include <cstdlib>
-
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/scoped_set_env.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <cstdlib>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-namespace {
-
-#ifdef _WIN32
-const int kMaxEnvVarValueSize = 1024;
-#endif
-
-void SetEnvVar(const char* name, const char* value) {
-#ifdef _WIN32
- SetEnvironmentVariableA(name, value);
-#else
- if (value == nullptr) {
- ::unsetenv(name);
- } else {
- ::setenv(name, value, 1);
- }
-#endif
-}
-
-} // namespace
-
-ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
- : var_name_(var_name), was_unset_(false) {
-#ifdef _WIN32
- char buf[kMaxEnvVarValueSize];
- auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
- ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
-
- if (get_res == 0) {
- was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
- } else {
- old_value_.assign(buf, get_res);
- }
-
- SetEnvironmentVariableA(var_name_.c_str(), new_value);
-#else
- const char* val = ::getenv(var_name_.c_str());
- if (val == nullptr) {
- was_unset_ = true;
- } else {
- old_value_ = val;
- }
-#endif
-
- SetEnvVar(var_name_.c_str(), new_value);
-}
-
-ScopedSetEnv::~ScopedSetEnv() {
- SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
-}
-
-} // namespace base_internal
+namespace base_internal {
+
+namespace {
+
+#ifdef _WIN32
+const int kMaxEnvVarValueSize = 1024;
+#endif
+
+void SetEnvVar(const char* name, const char* value) {
+#ifdef _WIN32
+ SetEnvironmentVariableA(name, value);
+#else
+ if (value == nullptr) {
+ ::unsetenv(name);
+ } else {
+ ::setenv(name, value, 1);
+ }
+#endif
+}
+
+} // namespace
+
+ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
+ : var_name_(var_name), was_unset_(false) {
+#ifdef _WIN32
+ char buf[kMaxEnvVarValueSize];
+ auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
+ ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
+
+ if (get_res == 0) {
+ was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
+ } else {
+ old_value_.assign(buf, get_res);
+ }
+
+ SetEnvironmentVariableA(var_name_.c_str(), new_value);
+#else
+ const char* val = ::getenv(var_name_.c_str());
+ if (val == nullptr) {
+ was_unset_ = true;
+ } else {
+ old_value_ = val;
+ }
+#endif
+
+ SetEnvVar(var_name_.c_str(), new_value);
+}
+
+ScopedSetEnv::~ScopedSetEnv() {
+ SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.h b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.h
index dafcabcb37..19ec7b5d8a 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env.h
@@ -1,45 +1,45 @@
-//
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
-#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
-
-#include <string>
-
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+
+#include <string>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-class ScopedSetEnv {
- public:
- ScopedSetEnv(const char* var_name, const char* new_value);
- ~ScopedSetEnv();
-
- private:
- std::string var_name_;
- std::string old_value_;
-
- // True if the environment variable was initially not set.
- bool was_unset_;
-};
-
-} // namespace base_internal
+namespace base_internal {
+
+class ScopedSetEnv {
+ public:
+ ScopedSetEnv(const char* var_name, const char* new_value);
+ ~ScopedSetEnv();
+
+ private:
+ std::string var_name_;
+ std::string old_value_;
+
+ // True if the environment variable was initially not set.
+ bool was_unset_;
+};
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env/ya.make b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env/ya.make
index 1636b39e67..d858ec9351 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/scoped_set_env/ya.make
@@ -1,34 +1,34 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
-
-SRCS(
- scoped_set_env.cc
-)
-
-END()
+SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
+
+SRCS(
+ scoped_set_env.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
index a3b39f2c28..35c0696a34 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.cc
@@ -1,71 +1,71 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/spinlock.h"
-
-#include <algorithm>
-#include <atomic>
-#include <limits>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/atomic_hook.h"
-#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/spinlock_wait.h"
-#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
-#include "absl/base/call_once.h"
-
-// Description of lock-word:
-// 31..00: [............................3][2][1][0]
-//
-// [0]: kSpinLockHeld
-// [1]: kSpinLockCooperative
-// [2]: kSpinLockDisabledScheduling
-// [31..3]: ONLY kSpinLockSleeper OR
-// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
-//
-// Detailed descriptions:
-//
-// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
-//
-// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
-// contended iff kSpinLockCooperative is set.
-//
-// Bit [2]: This bit is exclusive from bit [1]. It is used only by a
-// non-cooperative lock. When set, indicates that scheduling was
-// successfully disabled when the lock was acquired. May be unset,
-// even if non-cooperative, if a ThreadIdentity did not yet exist at
-// time of acquisition.
-//
-// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
-// acquired without contention, however, at least one waiter exists.
-//
-// Otherwise, bits [31..3] represent the time spent by the current lock
-// holder to acquire the lock. There may be outstanding waiter(s).
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
+#include "absl/base/call_once.h"
+
+// Description of lock-word:
+// 31..00: [............................3][2][1][0]
+//
+// [0]: kSpinLockHeld
+// [1]: kSpinLockCooperative
+// [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+// contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1]. It is used only by a
+// non-cooperative lock. When set, indicates that scheduling was
+// successfully disabled when the lock was acquired. May be unset,
+// even if non-cooperative, if a ThreadIdentity did not yet exist at
+// time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+// acquired without contention, however, at least one waiter exists.
+//
+// Otherwise, bits [31..3] represent the time spent by the current lock
+// holder to acquire the lock. There may be outstanding waiter(s).
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
+namespace base_internal {
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
const void *lock, int64_t wait_cycles)>
- submit_profile_data;
-
-void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
- int64_t wait_cycles)) {
- submit_profile_data.Store(fn);
-}
-
+ submit_profile_data;
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+ int64_t wait_cycles)) {
+ submit_profile_data.Store(fn);
+}
+
// Static member variable definitions.
constexpr uint32_t SpinLock::kSpinLockHeld;
constexpr uint32_t SpinLock::kSpinLockCooperative;
@@ -73,38 +73,38 @@ constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
constexpr uint32_t SpinLock::kSpinLockSleeper;
constexpr uint32_t SpinLock::kWaitTimeMask;
-// Uncommon constructors.
-SpinLock::SpinLock(base_internal::SchedulingMode mode)
- : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
- ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
-}
-
-// Monitor the lock to see if its value changes within some time period
-// (adaptive_spin_count loop iterations). The last value read from the lock
-// is returned from the method.
-uint32_t SpinLock::SpinLoop() {
- // We are already in the slow path of SpinLock, initialize the
- // adaptive_spin_count here.
- ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
- ABSL_CONST_INIT static int adaptive_spin_count = 0;
- base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
- adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
- });
-
- int c = adaptive_spin_count;
- uint32_t lock_value;
- do {
- lock_value = lockword_.load(std::memory_order_relaxed);
- } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
- return lock_value;
-}
-
-void SpinLock::SlowLock() {
- uint32_t lock_value = SpinLoop();
- lock_value = TryLockInternal(lock_value, 0);
- if ((lock_value & kSpinLockHeld) == 0) {
- return;
- }
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+ : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations). The last value read from the lock
+// is returned from the method.
+uint32_t SpinLock::SpinLoop() {
+ // We are already in the slow path of SpinLock, initialize the
+ // adaptive_spin_count here.
+ ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
+ ABSL_CONST_INIT static int adaptive_spin_count = 0;
+ base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
+ adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
+ });
+
+ int c = adaptive_spin_count;
+ uint32_t lock_value;
+ do {
+ lock_value = lockword_.load(std::memory_order_relaxed);
+ } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+ return lock_value;
+}
+
+void SpinLock::SlowLock() {
+ uint32_t lock_value = SpinLoop();
+ lock_value = TryLockInternal(lock_value, 0);
+ if ((lock_value & kSpinLockHeld) == 0) {
+ return;
+ }
base_internal::SchedulingMode scheduling_mode;
if ((lock_value & kSpinLockCooperative) != 0) {
@@ -113,34 +113,34 @@ void SpinLock::SlowLock() {
scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
}
- // The lock was not obtained initially, so this thread needs to wait for
- // it. Record the current timestamp in the local variable wait_start_time
- // so the total wait time can be stored in the lockword once this thread
- // obtains the lock.
- int64_t wait_start_time = CycleClock::Now();
- uint32_t wait_cycles = 0;
- int lock_wait_call_count = 0;
- while ((lock_value & kSpinLockHeld) != 0) {
- // If the lock is currently held, but not marked as having a sleeper, mark
- // it as having a sleeper.
- if ((lock_value & kWaitTimeMask) == 0) {
- // Here, just "mark" that the thread is going to sleep. Don't store the
+ // The lock was not obtained initially, so this thread needs to wait for
+ // it. Record the current timestamp in the local variable wait_start_time
+ // so the total wait time can be stored in the lockword once this thread
+ // obtains the lock.
+ int64_t wait_start_time = CycleClock::Now();
+ uint32_t wait_cycles = 0;
+ int lock_wait_call_count = 0;
+ while ((lock_value & kSpinLockHeld) != 0) {
+ // If the lock is currently held, but not marked as having a sleeper, mark
+ // it as having a sleeper.
+ if ((lock_value & kWaitTimeMask) == 0) {
+ // Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock -- the lock word stores the amount of time
// that the current holder waited before acquiring the lock, not the wait
// time of any thread currently waiting to acquire it.
- if (lockword_.compare_exchange_strong(
- lock_value, lock_value | kSpinLockSleeper,
- std::memory_order_relaxed, std::memory_order_relaxed)) {
- // Successfully transitioned to kSpinLockSleeper. Pass
- // kSpinLockSleeper to the SpinLockWait routine to properly indicate
- // the last lock_value observed.
- lock_value |= kSpinLockSleeper;
- } else if ((lock_value & kSpinLockHeld) == 0) {
- // Lock is free again, so try and acquire it before sleeping. The
- // new lock state will be the number of cycles this thread waited if
- // this thread obtains the lock.
- lock_value = TryLockInternal(lock_value, wait_cycles);
- continue; // Skip the delay at the end of the loop.
+ if (lockword_.compare_exchange_strong(
+ lock_value, lock_value | kSpinLockSleeper,
+ std::memory_order_relaxed, std::memory_order_relaxed)) {
+ // Successfully transitioned to kSpinLockSleeper. Pass
+ // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+ // the last lock_value observed.
+ lock_value |= kSpinLockSleeper;
+ } else if ((lock_value & kSpinLockHeld) == 0) {
+ // Lock is free again, so try and acquire it before sleeping. The
+ // new lock state will be the number of cycles this thread waited if
+ // this thread obtains the lock.
+ lock_value = TryLockInternal(lock_value, wait_cycles);
+ continue; // Skip the delay at the end of the loop.
} else if ((lock_value & kWaitTimeMask) == 0) {
// The lock is still held, without a waiter being marked, but something
// else about the lock word changed, causing our CAS to fail. For
@@ -149,81 +149,81 @@ void SpinLock::SlowLock() {
// set that flag. In this case, attempt again to mark ourselves as a
// waiter.
continue;
- }
- }
-
- // SpinLockDelay() calls into fiber scheduler, we need to see
- // synchronization there to avoid false positives.
- ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- // Wait for an OS specific delay.
- base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
- scheduling_mode);
- ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
- // Spin again after returning from the wait routine to give this thread
- // some chance of obtaining the lock.
- lock_value = SpinLoop();
- wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
- lock_value = TryLockInternal(lock_value, wait_cycles);
- }
-}
-
-void SpinLock::SlowUnlock(uint32_t lock_value) {
- base_internal::SpinLockWake(&lockword_,
- false); // wake waiter if necessary
-
- // If our acquisition was contended, collect contentionz profile info. We
- // reserve a unitary wait time to represent that a waiter exists without our
- // own acquisition having been contended.
- if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
- const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
- ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- submit_profile_data(this, wait_cycles);
- ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
- }
-}
-
-// We use the upper 29 bits of the lock word to store the time spent waiting to
-// acquire this lock. This is reported by contentionz profiling. Since the
-// lower bits of the cycle counter wrap very quickly on high-frequency
+ }
+ }
+
+ // SpinLockDelay() calls into fiber scheduler, we need to see
+ // synchronization there to avoid false positives.
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ // Wait for an OS specific delay.
+ base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+ scheduling_mode);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ // Spin again after returning from the wait routine to give this thread
+ // some chance of obtaining the lock.
+ lock_value = SpinLoop();
+ wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
+ lock_value = TryLockInternal(lock_value, wait_cycles);
+ }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+ base_internal::SpinLockWake(&lockword_,
+ false); // wake waiter if necessary
+
+ // If our acquisition was contended, collect contentionz profile info. We
+ // reserve a unitary wait time to represent that a waiter exists without our
+ // own acquisition having been contended.
+ if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+ const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ submit_profile_data(this, wait_cycles);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock. This is reported by contentionz profiling. Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
// processors we divide to reduce the granularity to 2^kProfileTimestampShift
-// sized units. On a 4Ghz machine this will lose track of wait times greater
-// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
+// sized units. On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
static constexpr int kProfileTimestampShift = 7;
-
+
// We currently reserve the lower 3 bits.
static constexpr int kLockwordReservedShift = 3;
-uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
- int64_t wait_end_time) {
- static const int64_t kMaxWaitTime =
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+ int64_t wait_end_time) {
+ static const int64_t kMaxWaitTime =
std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
- int64_t scaled_wait_time =
+ int64_t scaled_wait_time =
(wait_end_time - wait_start_time) >> kProfileTimestampShift;
-
- // Return a representation of the time spent waiting that can be stored in
- // the lock word's upper bits.
- uint32_t clamped = static_cast<uint32_t>(
+
+ // Return a representation of the time spent waiting that can be stored in
+ // the lock word's upper bits.
+ uint32_t clamped = static_cast<uint32_t>(
std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
-
- if (clamped == 0) {
- return kSpinLockSleeper; // Just wake waiters, but don't record contention.
- }
- // Bump up value if necessary to avoid returning kSpinLockSleeper.
- const uint32_t kMinWaitTime =
+
+ if (clamped == 0) {
+ return kSpinLockSleeper; // Just wake waiters, but don't record contention.
+ }
+ // Bump up value if necessary to avoid returning kSpinLockSleeper.
+ const uint32_t kMinWaitTime =
kSpinLockSleeper + (1 << kLockwordReservedShift);
- if (clamped == kSpinLockSleeper) {
- return kMinWaitTime;
- }
- return clamped;
-}
-
-uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
- // Cast to uint32_t first to ensure bits [63:32] are cleared.
- const uint64_t scaled_wait_time =
- static_cast<uint32_t>(lock_value & kWaitTimeMask);
+ if (clamped == kSpinLockSleeper) {
+ return kMinWaitTime;
+ }
+ return clamped;
+}
+
+uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+ // Cast to uint32_t first to ensure bits [63:32] are cleared.
+ const uint64_t scaled_wait_time =
+ static_cast<uint32_t>(lock_value & kWaitTimeMask);
return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
-}
-
-} // namespace base_internal
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
index cafaaa786b..ac40daff12 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock.h
@@ -1,64 +1,64 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Most users requiring mutual exclusion should use Mutex.
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Most users requiring mutual exclusion should use Mutex.
// SpinLock is provided for use in two situations:
// - for use by Abseil internal code that Mutex itself depends on
-// - for async signal safety (see below)
-
-// SpinLock is async signal safe. If a spinlock is used within a signal
-// handler, all code that acquires the lock must ensure that the signal cannot
-// arrive while they are holding the lock. Typically, this is done by blocking
-// the signal.
+// - for async signal safety (see below)
+
+// SpinLock is async signal safe. If a spinlock is used within a signal
+// handler, all code that acquires the lock must ensure that the signal cannot
+// arrive while they are holding the lock. Typically, this is done by blocking
+// the signal.
//
// Threads waiting on a SpinLock may be woken in an arbitrary order.
-
-#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
-#define ABSL_BASE_INTERNAL_SPINLOCK_H_
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
-#include "absl/base/dynamic_annotations.h"
-#include "absl/base/internal/low_level_scheduling.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/scheduling_mode.h"
-#include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/macros.h"
-#include "absl/base/port.h"
-#include "absl/base/thread_annotations.h"
-
-namespace absl {
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-class ABSL_LOCKABLE SpinLock {
- public:
- SpinLock() : lockword_(kSpinLockCooperative) {
- ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
- }
-
- // Constructors that allow non-cooperative spinlocks to be created for use
- // inside thread schedulers. Normal clients should not use these.
- explicit SpinLock(base_internal::SchedulingMode mode);
-
+namespace base_internal {
+
+class ABSL_LOCKABLE SpinLock {
+ public:
+ SpinLock() : lockword_(kSpinLockCooperative) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+ }
+
+ // Constructors that allow non-cooperative spinlocks to be created for use
+ // inside thread schedulers. Normal clients should not use these.
+ explicit SpinLock(base_internal::SchedulingMode mode);
+
// Constructor for global SpinLock instances. See absl/base/const_init.h.
constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
@@ -67,81 +67,81 @@ class ABSL_LOCKABLE SpinLock {
// Default but non-trivial destructor in some build configurations causes an
// extra static initializer.
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
- ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+ ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
#else
~SpinLock() = default;
#endif
-
- // Acquire this SpinLock.
- inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
- if (!TryLockImpl()) {
- SlowLock();
- }
- ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
- }
-
- // Try to acquire this SpinLock without blocking and return true if the
- // acquisition was successful. If the lock was not acquired, false is
- // returned. If this SpinLock is free at the time of the call, TryLock
- // will return true with high probability.
- inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
- bool res = TryLockImpl();
- ABSL_TSAN_MUTEX_POST_LOCK(
- this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
- 0);
- return res;
- }
-
- // Release this SpinLock, which must be held by the calling thread.
- inline void Unlock() ABSL_UNLOCK_FUNCTION() {
- ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
- uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
- lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
- std::memory_order_release);
-
- if ((lock_value & kSpinLockDisabledScheduling) != 0) {
- base_internal::SchedulingGuard::EnableRescheduling(true);
- }
- if ((lock_value & kWaitTimeMask) != 0) {
- // Collect contentionz profile info, and speed the wakeup of any waiter.
- // The wait_cycles value indicates how long this thread spent waiting
- // for the lock.
- SlowUnlock(lock_value);
- }
- ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
- }
-
- // Determine if the lock is held. When the lock is held by the invoking
- // thread, true will always be returned. Intended to be used as
- // CHECK(lock.IsHeld()).
- inline bool IsHeld() const {
- return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
- }
-
- protected:
- // These should not be exported except for testing.
-
- // Store number of cycles between wait_start_time and wait_end_time in a
- // lock value.
- static uint32_t EncodeWaitCycles(int64_t wait_start_time,
- int64_t wait_end_time);
-
- // Extract number of wait cycles in a lock value.
- static uint64_t DecodeWaitCycles(uint32_t lock_value);
-
- // Provide access to protected method above. Use for testing only.
- friend struct SpinLockTest;
-
- private:
- // lockword_ is used to store the following:
- //
- // bit[0] encodes whether a lock is being held.
- // bit[1] encodes whether a lock uses cooperative scheduling.
+
+ // Acquire this SpinLock.
+ inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ if (!TryLockImpl()) {
+ SlowLock();
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ }
+
+ // Try to acquire this SpinLock without blocking and return true if the
+ // acquisition was successful. If the lock was not acquired, false is
+ // returned. If this SpinLock is free at the time of the call, TryLock
+ // will return true with high probability.
+ inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+ bool res = TryLockImpl();
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+ 0);
+ return res;
+ }
+
+ // Release this SpinLock, which must be held by the calling thread.
+ inline void Unlock() ABSL_UNLOCK_FUNCTION() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+ uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+ lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
+ std::memory_order_release);
+
+ if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+ base_internal::SchedulingGuard::EnableRescheduling(true);
+ }
+ if ((lock_value & kWaitTimeMask) != 0) {
+ // Collect contentionz profile info, and speed the wakeup of any waiter.
+ // The wait_cycles value indicates how long this thread spent waiting
+ // for the lock.
+ SlowUnlock(lock_value);
+ }
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+ }
+
+ // Determine if the lock is held. When the lock is held by the invoking
+ // thread, true will always be returned. Intended to be used as
+ // CHECK(lock.IsHeld()).
+ inline bool IsHeld() const {
+ return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+ }
+
+ protected:
+ // These should not be exported except for testing.
+
+ // Store number of cycles between wait_start_time and wait_end_time in a
+ // lock value.
+ static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+ int64_t wait_end_time);
+
+ // Extract number of wait cycles in a lock value.
+ static uint64_t DecodeWaitCycles(uint32_t lock_value);
+
+ // Provide access to protected method above. Use for testing only.
+ friend struct SpinLockTest;
+
+ private:
+ // lockword_ is used to store the following:
+ //
+ // bit[0] encodes whether a lock is being held.
+ // bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether the current lock holder disabled scheduling when
// acquiring the lock. Only set when kSpinLockHeld is also set.
- // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+ // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
// This is set by the lock holder to indicate how long it waited on
// the lock before eventually acquiring it. The number of cycles is
// encoded as a 29-bit unsigned int, or in the case that the current
@@ -160,89 +160,89 @@ class ABSL_LOCKABLE SpinLock {
// Includes kSpinLockSleeper.
static constexpr uint32_t kWaitTimeMask =
~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
-
- // Returns true if the provided scheduling mode is cooperative.
- static constexpr bool IsCooperative(
- base_internal::SchedulingMode scheduling_mode) {
- return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
- }
-
- uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
- void SlowLock() ABSL_ATTRIBUTE_COLD;
- void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
- uint32_t SpinLoop();
-
- inline bool TryLockImpl() {
- uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
- return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
- }
-
- std::atomic<uint32_t> lockword_;
-
- SpinLock(const SpinLock&) = delete;
- SpinLock& operator=(const SpinLock&) = delete;
-};
-
-// Corresponding locker object that arranges to acquire a spinlock for
-// the duration of a C++ scope.
-class ABSL_SCOPED_LOCKABLE SpinLockHolder {
- public:
- inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
- : lock_(l) {
- l->Lock();
- }
- inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
-
- SpinLockHolder(const SpinLockHolder&) = delete;
- SpinLockHolder& operator=(const SpinLockHolder&) = delete;
-
- private:
- SpinLock* lock_;
-};
-
-// Register a hook for profiling support.
-//
-// The function pointer registered here will be called whenever a spinlock is
-// contended. The callback is given an opaque handle to the contended spinlock
-// and the number of wait cycles. This is thread-safe, but only a single
-// profiler can be registered. It is an error to call this function multiple
-// times with different arguments.
-void RegisterSpinLockProfiler(void (*fn)(const void* lock,
- int64_t wait_cycles));
-
-//------------------------------------------------------------------------------
-// Public interface ends here.
-//------------------------------------------------------------------------------
-
-// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
-// Otherwise, returns last observed value for lockword_.
-inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
- uint32_t wait_cycles) {
- if ((lock_value & kSpinLockHeld) != 0) {
- return lock_value;
- }
-
- uint32_t sched_disabled_bit = 0;
- if ((lock_value & kSpinLockCooperative) == 0) {
- // For non-cooperative locks we must make sure we mark ourselves as
- // non-reschedulable before we attempt to CompareAndSwap.
- if (base_internal::SchedulingGuard::DisableRescheduling()) {
- sched_disabled_bit = kSpinLockDisabledScheduling;
- }
- }
-
- if (!lockword_.compare_exchange_strong(
- lock_value,
- kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
- }
-
- return lock_value;
-}
-
-} // namespace base_internal
+
+ // Returns true if the provided scheduling mode is cooperative.
+ static constexpr bool IsCooperative(
+ base_internal::SchedulingMode scheduling_mode) {
+ return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+ }
+
+ uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+ void SlowLock() ABSL_ATTRIBUTE_COLD;
+ void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+ uint32_t SpinLoop();
+
+ inline bool TryLockImpl() {
+ uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+ return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+ }
+
+ std::atomic<uint32_t> lockword_;
+
+ SpinLock(const SpinLock&) = delete;
+ SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class ABSL_SCOPED_LOCKABLE SpinLockHolder {
+ public:
+ inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
+ : lock_(l) {
+ l->Lock();
+ }
+ inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+ SpinLockHolder(const SpinLockHolder&) = delete;
+ SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+ SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended. The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles. This is thread-safe, but only a single
+// profiler can be registered. It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+ int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+ uint32_t wait_cycles) {
+ if ((lock_value & kSpinLockHeld) != 0) {
+ return lock_value;
+ }
+
+ uint32_t sched_disabled_bit = 0;
+ if ((lock_value & kSpinLockCooperative) == 0) {
+ // For non-cooperative locks we must make sure we mark ourselves as
+ // non-reschedulable before we attempt to CompareAndSwap.
+ if (base_internal::SchedulingGuard::DisableRescheduling()) {
+ sched_disabled_bit = kSpinLockDisabledScheduling;
+ }
+ }
+
+ if (!lockword_.compare_exchange_strong(
+ lock_value,
+ kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
+ }
+
+ return lock_value;
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
index b83fd84870..7b0cada4f1 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_akaros.inc
@@ -1,35 +1,35 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file is an Akaros-specific part of spinlock_wait.cc
-
-#include <atomic>
-
-#include "absl/base/internal/scheduling_mode.h"
-
-extern "C" {
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
- int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
- // In Akaros, one must take care not to call anything that could cause a
- // malloc(), a blocking system call, or a uthread_yield() while holding a
- // spinlock. Our callers assume will not call into libraries or other
- // arbitrary code.
-}
-
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+ int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+ // In Akaros, one must take care not to call anything that could cause a
+ // malloc(), a blocking system call, or a uthread_yield() while holding a
+ // spinlock. Our callers assume will not call into libraries or other
+ // arbitrary code.
+}
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
-
-} // extern "C"
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
index f32a54a7ea..202f7cdfc8 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_linux.inc
@@ -1,51 +1,51 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file is a Linux-specific part of spinlock_wait.cc
-
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include <atomic>
-#include <climits>
-#include <cstdint>
-#include <ctime>
-
-#include "absl/base/attributes.h"
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Linux-specific part of spinlock_wait.cc
+
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <climits>
+#include <cstdint>
+#include <ctime>
+
+#include "absl/base/attributes.h"
#include "absl/base/internal/errno_saver.h"
-
-// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
-// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
-// by SYS_futex. We also assume that reads/writes done to the lockword
-// by SYS_futex have rational semantics with regard to the
-// std::atomic<> API. C++ provides no guarantees of these assumptions,
-// but they are believed to hold in practice.
-static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
- "SpinLock lockword has the wrong size for a futex");
-
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#endif
-
+
+// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
+// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
+// by SYS_futex. We also assume that reads/writes done to the lockword
+// by SYS_futex have rational semantics with regard to the
+// std::atomic<> API. C++ provides no guarantees of these assumptions,
+// but they are believed to hold in practice.
+static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
+ "SpinLock lockword has the wrong size for a futex");
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#endif
+
#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
#define SYS_futex_time64 __NR_futex_time64
#endif
@@ -54,21 +54,21 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
#define SYS_futex SYS_futex_time64
#endif
-extern "C" {
-
+extern "C" {
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t> *w, uint32_t value, int loop,
- absl::base_internal::SchedulingMode) {
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ absl::base_internal::SchedulingMode) {
absl::base_internal::ErrnoSaver errno_saver;
- struct timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
- syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
-}
-
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+ syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+}
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t> *w, bool all) {
- syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
-}
-
-} // extern "C"
+ syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
+}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
index bc16bd318e..4f6f887d99 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_posix.inc
@@ -1,46 +1,46 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file is a Posix-specific part of spinlock_wait.cc
-
-#include <sched.h>
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+
+#include <atomic>
+#include <ctime>
-#include <atomic>
-#include <ctime>
-
#include "absl/base/internal/errno_saver.h"
-#include "absl/base/internal/scheduling_mode.h"
-#include "absl/base/port.h"
-
-extern "C" {
-
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/port.h"
+
+extern "C" {
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
- absl::base_internal::SchedulingMode /* mode */) {
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ absl::base_internal::SchedulingMode /* mode */) {
absl::base_internal::ErrnoSaver errno_saver;
- if (loop == 0) {
- } else if (loop == 1) {
- sched_yield();
- } else {
- struct timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
- nanosleep(&tm, nullptr);
- }
-}
-
+ if (loop == 0) {
+ } else if (loop == 1) {
+ sched_yield();
+ } else {
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+ nanosleep(&tm, nullptr);
+ }
+}
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
-
-} // extern "C"
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.cc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.cc
index 9adc42c11c..fa824be1c0 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.cc
@@ -1,81 +1,81 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The OS-specific header included below must provide two calls:
-// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
-// See spinlock_wait.h for the specs.
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/spinlock_wait.h"
-
-#if defined(_WIN32)
-#include "absl/base/internal/spinlock_win32.inc"
-#elif defined(__linux__)
-#include "absl/base/internal/spinlock_linux.inc"
-#elif defined(__akaros__)
-#include "absl/base/internal/spinlock_akaros.inc"
-#else
-#include "absl/base/internal/spinlock_posix.inc"
-#endif
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__linux__)
+#include "absl/base/internal/spinlock_linux.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
+#else
+#include "absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// See spinlock_wait.h for spec.
-uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
- const SpinLockWaitTransition trans[],
- base_internal::SchedulingMode scheduling_mode) {
- int loop = 0;
- for (;;) {
- uint32_t v = w->load(std::memory_order_acquire);
- int i;
- for (i = 0; i != n && v != trans[i].from; i++) {
- }
- if (i == n) {
- SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition
- } else if (trans[i].to == v || // null transition
- w->compare_exchange_strong(v, trans[i].to,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- if (trans[i].done) return v;
- }
- }
-}
-
-static std::atomic<uint64_t> delay_rand;
-
-// Return a suggested delay in nanoseconds for iteration number "loop"
-int SpinLockSuggestedDelayNS(int loop) {
- // Weak pseudo-random number generator to get some spread between threads
- // when many are spinning.
- uint64_t r = delay_rand.load(std::memory_order_relaxed);
- r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
- delay_rand.store(r, std::memory_order_relaxed);
-
- if (loop < 0 || loop > 32) { // limit loop to 0..32
- loop = 32;
- }
- const int kMinDelay = 128 << 10; // 128us
- // Double delay every 8 iterations, up to 16x (2ms).
- int delay = kMinDelay << (loop / 8);
- // Randomize in delay..2*delay range, for resulting 128us..4ms range.
- return delay | ((delay - 1) & static_cast<int>(r));
-}
-
-} // namespace base_internal
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+ const SpinLockWaitTransition trans[],
+ base_internal::SchedulingMode scheduling_mode) {
+ int loop = 0;
+ for (;;) {
+ uint32_t v = w->load(std::memory_order_acquire);
+ int i;
+ for (i = 0; i != n && v != trans[i].from; i++) {
+ }
+ if (i == n) {
+ SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition
+ } else if (trans[i].to == v || // null transition
+ w->compare_exchange_strong(v, trans[i].to,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if (trans[i].done) return v;
+ }
+ }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+ // Weak pseudo-random number generator to get some spread between threads
+ // when many are spinning.
+ uint64_t r = delay_rand.load(std::memory_order_relaxed);
+ r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
+ delay_rand.store(r, std::memory_order_relaxed);
+
+ if (loop < 0 || loop > 32) { // limit loop to 0..32
+ loop = 32;
+ }
+ const int kMinDelay = 128 << 10; // 128us
+ // Double delay every 8 iterations, up to 16x (2ms).
+ int delay = kMinDelay << (loop / 8);
+ // Randomize in delay..2*delay range, for resulting 128us..4ms range.
+ return delay | ((delay - 1) & static_cast<int>(r));
+}
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
index a66086318e..9a1adcda5e 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait.h
@@ -1,95 +1,95 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
-#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
-
-// Operations to make atomic transitions on a word, and to allow
-// waiting for those transitions to become possible.
-
-#include <stdint.h>
-#include <atomic>
-
-#include "absl/base/internal/scheduling_mode.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+
+// Operations to make atomic transitions on a word, and to allow
+// waiting for those transitions to become possible.
+
+#include <stdint.h>
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// SpinLockWait() waits until it can perform one of several transitions from
-// "from" to "to". It returns when it performs a transition where done==true.
-struct SpinLockWaitTransition {
- uint32_t from;
- uint32_t to;
- bool done;
-};
-
-// Wait until *w can transition from trans[i].from to trans[i].to for some i
-// satisfying 0<=i<n && trans[i].done, atomically make the transition,
-// then return the old value of *w. Make any other atomic transitions
-// where !trans[i].done, but continue waiting.
+namespace base_internal {
+
+// SpinLockWait() waits until it can perform one of several transitions from
+// "from" to "to". It returns when it performs a transition where done==true.
+struct SpinLockWaitTransition {
+ uint32_t from;
+ uint32_t to;
+ bool done;
+};
+
+// Wait until *w can transition from trans[i].from to trans[i].to for some i
+// satisfying 0<=i<n && trans[i].done, atomically make the transition,
+// then return the old value of *w. Make any other atomic transitions
+// where !trans[i].done, but continue waiting.
//
// Wakeups for threads blocked on SpinLockWait do not respect priorities.
-uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
- const SpinLockWaitTransition trans[],
- SchedulingMode scheduling_mode);
-
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+ const SpinLockWaitTransition trans[],
+ SchedulingMode scheduling_mode);
+
// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
// is true, wake all such threads. On some systems, this may be a no-op; on
// those systems, threads calling SpinLockDelay() will always wake eventually
// even if SpinLockWake() is never called.
-void SpinLockWake(std::atomic<uint32_t> *w, bool all);
-
-// Wait for an appropriate spin delay on iteration "loop" of a
-// spin loop on location *w, whose previously observed value was "value".
-// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
+void SpinLockWake(std::atomic<uint32_t> *w, bool all);
+
+// Wait for an appropriate spin delay on iteration "loop" of a
+// spin loop on location *w, whose previously observed value was "value".
+// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a call to SpinLockWake(w).
-void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
- base_internal::SchedulingMode scheduling_mode);
-
-// Helper used by AbslInternalSpinLockDelay.
-// Returns a suggested delay in nanoseconds for iteration number "loop".
-int SpinLockSuggestedDelayNS(int loop);
-
-} // namespace base_internal
+void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
+ base_internal::SchedulingMode scheduling_mode);
+
+// Helper used by AbslInternalSpinLockDelay.
+// Returns a suggested delay in nanoseconds for iteration number "loop".
+int SpinLockSuggestedDelayNS(int loop);
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker. This causes it to flag weak symbol overrides as ODR
-// violations. Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
bool all);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t> *w, uint32_t value, int loop,
- absl::base_internal::SchedulingMode scheduling_mode);
-}
-
-inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
- bool all) {
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ absl::base_internal::SchedulingMode scheduling_mode);
+}
+
+inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
+ bool all) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
-}
-
-inline void absl::base_internal::SpinLockDelay(
- std::atomic<uint32_t> *w, uint32_t value, int loop,
- absl::base_internal::SchedulingMode scheduling_mode) {
+}
+
+inline void absl::base_internal::SpinLockDelay(
+ std::atomic<uint32_t> *w, uint32_t value, int loop,
+ absl::base_internal::SchedulingMode scheduling_mode) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
(w, value, loop, scheduling_mode);
-}
-
-#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+}
+
+#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait/ya.make b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait/ya.make
index 9374a06488..8eae89d717 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait/ya.make
@@ -1,29 +1,29 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
-
-SRCS(
- spinlock_wait.cc
-)
-
-END()
+SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
+
+SRCS(
+ spinlock_wait.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
index 8c2d61f31a..9d224813a5 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/spinlock_win32.inc
@@ -1,37 +1,37 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file is a Win32-specific part of spinlock_wait.cc
-
-#include <windows.h>
-#include <atomic>
-#include "absl/base/internal/scheduling_mode.h"
-
-extern "C" {
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Win32-specific part of spinlock_wait.cc
+
+#include <windows.h>
+#include <atomic>
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) {
- if (loop == 0) {
- } else if (loop == 1) {
- Sleep(0);
- } else {
- Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
- }
-}
-
+ if (loop == 0) {
+ } else if (loop == 1) {
+ Sleep(0);
+ } else {
+ Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+ }
+}
+
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
-
-} // extern "C"
+
+} // extern "C"
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
index 350799e471..8c2e6c87fa 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.cc
@@ -1,66 +1,66 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/sysinfo.h"
-
-#include "absl/base/attributes.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <fcntl.h>
-#include <pthread.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <sys/syscall.h>
-#endif
-
-#if defined(__APPLE__) || defined(__FreeBSD__)
-#include <sys/sysctl.h>
-#endif
-
-#if defined(__myriad2__)
-#error #include <rtems.h>
-#endif
-
-#include <string.h>
-
-#include <cassert>
-#include <cstdint>
-#include <cstdio>
-#include <cstdlib>
-#include <ctime>
-#include <limits>
-#include <thread> // NOLINT(build/c++11)
-#include <utility>
-#include <vector>
-
-#include "absl/base/call_once.h"
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#include "absl/base/attributes.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(__myriad2__)
+#error #include <rtems.h>
+#endif
+
+#include <string.h>
+
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <limits>
+#include <thread> // NOLINT(build/c++11)
+#include <utility>
+#include <vector>
+
+#include "absl/base/call_once.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/unscaledcycleclock.h"
#include "absl/base/thread_annotations.h"
-
-namespace absl {
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
+namespace base_internal {
+
namespace {
#if defined(_WIN32)
@@ -125,25 +125,25 @@ int Win32NumCPUs() {
} // namespace
-static int GetNumCPUs() {
-#if defined(__myriad2__)
- return 1;
+static int GetNumCPUs() {
+#if defined(__myriad2__)
+ return 1;
#elif defined(_WIN32)
const unsigned hardware_concurrency = Win32NumCPUs();
return hardware_concurrency ? hardware_concurrency : 1;
#elif defined(_AIX)
return sysconf(_SC_NPROCESSORS_ONLN);
-#else
- // Other possibilities:
- // - Read /sys/devices/system/cpu/online and use cpumask_parse()
- // - sysconf(_SC_NPROCESSORS_ONLN)
- return std::thread::hardware_concurrency();
-#endif
-}
-
-#if defined(_WIN32)
-
-static double GetNominalCPUFrequency() {
+#else
+ // Other possibilities:
+ // - Read /sys/devices/system/cpu/online and use cpumask_parse()
+ // - sysconf(_SC_NPROCESSORS_ONLN)
+ return std::thread::hardware_concurrency();
+#endif
+}
+
+#if defined(_WIN32)
+
+static double GetNominalCPUFrequency() {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
// UWP apps don't have access to the registry and currently don't provide an
@@ -167,330 +167,330 @@ static double GetNominalCPUFrequency() {
data_size == sizeof(data)) {
return data * 1e6; // Value is MHz.
}
- }
- return 1.0;
+ }
+ return 1.0;
#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
-}
-
-#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
-
-static double GetNominalCPUFrequency() {
- unsigned freq;
- size_t size = sizeof(freq);
- int mib[2] = {CTL_HW, HW_CPU_FREQ};
- if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
- return static_cast<double>(freq);
- }
- return 1.0;
-}
-
-#else
-
-// Helper function for reading a long from a file. Returns true if successful
-// and the memory location pointed to by value is set to the value read.
-static bool ReadLongFromFile(const char *file, long *value) {
- bool ret = false;
- int fd = open(file, O_RDONLY);
- if (fd != -1) {
- char line[1024];
- char *err;
- memset(line, '\0', sizeof(line));
- int len = read(fd, line, sizeof(line) - 1);
- if (len <= 0) {
- ret = false;
- } else {
- const long temp_value = strtol(line, &err, 10);
- if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
- *value = temp_value;
- ret = true;
- }
- }
- close(fd);
- }
- return ret;
-}
-
-#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
-
-// Reads a monotonic time source and returns a value in
-// nanoseconds. The returned value uses an arbitrary epoch, not the
-// Unix epoch.
-static int64_t ReadMonotonicClockNanos() {
- struct timespec t;
-#ifdef CLOCK_MONOTONIC_RAW
- int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
-#else
- int rc = clock_gettime(CLOCK_MONOTONIC, &t);
-#endif
- if (rc != 0) {
- perror("clock_gettime() failed");
- abort();
- }
- return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
-}
-
-class UnscaledCycleClockWrapperForInitializeFrequency {
- public:
- static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
-};
-
-struct TimeTscPair {
- int64_t time; // From ReadMonotonicClockNanos().
- int64_t tsc; // From UnscaledCycleClock::Now().
-};
-
-// Returns a pair of values (monotonic kernel time, TSC ticks) that
-// approximately correspond to each other. This is accomplished by
-// doing several reads and picking the reading with the lowest
-// latency. This approach is used to minimize the probability that
-// our thread was preempted between clock reads.
-static TimeTscPair GetTimeTscPair() {
- int64_t best_latency = std::numeric_limits<int64_t>::max();
- TimeTscPair best;
- for (int i = 0; i < 10; ++i) {
- int64_t t0 = ReadMonotonicClockNanos();
- int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
- int64_t t1 = ReadMonotonicClockNanos();
- int64_t latency = t1 - t0;
- if (latency < best_latency) {
- best_latency = latency;
- best.time = t0;
- best.tsc = tsc;
- }
- }
- return best;
-}
-
-// Measures and returns the TSC frequency by taking a pair of
-// measurements approximately `sleep_nanoseconds` apart.
-static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
- auto t0 = GetTimeTscPair();
- struct timespec ts;
- ts.tv_sec = 0;
- ts.tv_nsec = sleep_nanoseconds;
- while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
- auto t1 = GetTimeTscPair();
- double elapsed_ticks = t1.tsc - t0.tsc;
- double elapsed_time = (t1.time - t0.time) * 1e-9;
- return elapsed_ticks / elapsed_time;
-}
-
-// Measures and returns the TSC frequency by calling
-// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
-// frequency measurement stabilizes.
-static double MeasureTscFrequency() {
- double last_measurement = -1.0;
- int sleep_nanoseconds = 1000000; // 1 millisecond.
- for (int i = 0; i < 8; ++i) {
- double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
- if (measurement * 0.99 < last_measurement &&
- last_measurement < measurement * 1.01) {
- // Use the current measurement if it is within 1% of the
- // previous measurement.
- return measurement;
- }
- last_measurement = measurement;
- sleep_nanoseconds *= 2;
- }
- return last_measurement;
-}
-
-#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-
-static double GetNominalCPUFrequency() {
- long freq = 0;
-
- // Google's production kernel has a patch to export the TSC
- // frequency through sysfs. If the kernel is exporting the TSC
- // frequency use that. There are issues where cpuinfo_max_freq
- // cannot be relied on because the BIOS may be exporting an invalid
- // p-state (on x86) or p-states may be used to put the processor in
- // a new mode (turbo mode). Essentially, those frequencies cannot
- // always be relied upon. The same reasons apply to /proc/cpuinfo as
- // well.
- if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
- return freq * 1e3; // Value is kHz.
- }
-
-#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
- // On these platforms, the TSC frequency is the nominal CPU
- // frequency. But without having the kernel export it directly
- // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
- // other way to reliably get the TSC frequency, so we have to
- // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by
- // exporting "fake" frequencies for implementing new features. For
- // example, Intel's turbo mode is enabled by exposing a p-state
- // value with a higher frequency than that of the real TSC
- // rate. Because of this, we prefer to measure the TSC rate
- // ourselves on i386 and x86-64.
- return MeasureTscFrequency();
-#else
-
- // If CPU scaling is in effect, we want to use the *maximum*
- // frequency, not whatever CPU speed some random processor happens
- // to be using now.
- if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
- &freq)) {
- return freq * 1e3; // Value is kHz.
- }
-
- return 1.0;
-#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-}
-
-#endif
-
+}
+
+#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
+
+static double GetNominalCPUFrequency() {
+ unsigned freq;
+ size_t size = sizeof(freq);
+ int mib[2] = {CTL_HW, HW_CPU_FREQ};
+ if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
+ return static_cast<double>(freq);
+ }
+ return 1.0;
+}
+
+#else
+
+// Helper function for reading a long from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+static bool ReadLongFromFile(const char *file, long *value) {
+ bool ret = false;
+ int fd = open(file, O_RDONLY);
+ if (fd != -1) {
+ char line[1024];
+ char *err;
+ memset(line, '\0', sizeof(line));
+ int len = read(fd, line, sizeof(line) - 1);
+ if (len <= 0) {
+ ret = false;
+ } else {
+ const long temp_value = strtol(line, &err, 10);
+ if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+ *value = temp_value;
+ ret = true;
+ }
+ }
+ close(fd);
+ }
+ return ret;
+}
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+
+// Reads a monotonic time source and returns a value in
+// nanoseconds. The returned value uses an arbitrary epoch, not the
+// Unix epoch.
+static int64_t ReadMonotonicClockNanos() {
+ struct timespec t;
+#ifdef CLOCK_MONOTONIC_RAW
+ int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
+#else
+ int rc = clock_gettime(CLOCK_MONOTONIC, &t);
+#endif
+ if (rc != 0) {
+ perror("clock_gettime() failed");
+ abort();
+ }
+ return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
+}
+
+class UnscaledCycleClockWrapperForInitializeFrequency {
+ public:
+ static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+
+struct TimeTscPair {
+ int64_t time; // From ReadMonotonicClockNanos().
+ int64_t tsc; // From UnscaledCycleClock::Now().
+};
+
+// Returns a pair of values (monotonic kernel time, TSC ticks) that
+// approximately correspond to each other. This is accomplished by
+// doing several reads and picking the reading with the lowest
+// latency. This approach is used to minimize the probability that
+// our thread was preempted between clock reads.
+static TimeTscPair GetTimeTscPair() {
+ int64_t best_latency = std::numeric_limits<int64_t>::max();
+ TimeTscPair best;
+ for (int i = 0; i < 10; ++i) {
+ int64_t t0 = ReadMonotonicClockNanos();
+ int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
+ int64_t t1 = ReadMonotonicClockNanos();
+ int64_t latency = t1 - t0;
+ if (latency < best_latency) {
+ best_latency = latency;
+ best.time = t0;
+ best.tsc = tsc;
+ }
+ }
+ return best;
+}
+
+// Measures and returns the TSC frequency by taking a pair of
+// measurements approximately `sleep_nanoseconds` apart.
+static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
+ auto t0 = GetTimeTscPair();
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = sleep_nanoseconds;
+ while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
+ auto t1 = GetTimeTscPair();
+ double elapsed_ticks = t1.tsc - t0.tsc;
+ double elapsed_time = (t1.time - t0.time) * 1e-9;
+ return elapsed_ticks / elapsed_time;
+}
+
+// Measures and returns the TSC frequency by calling
+// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
+// frequency measurement stabilizes.
+static double MeasureTscFrequency() {
+ double last_measurement = -1.0;
+ int sleep_nanoseconds = 1000000; // 1 millisecond.
+ for (int i = 0; i < 8; ++i) {
+ double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
+ if (measurement * 0.99 < last_measurement &&
+ last_measurement < measurement * 1.01) {
+ // Use the current measurement if it is within 1% of the
+ // previous measurement.
+ return measurement;
+ }
+ last_measurement = measurement;
+ sleep_nanoseconds *= 2;
+ }
+ return last_measurement;
+}
+
+#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+
+static double GetNominalCPUFrequency() {
+ long freq = 0;
+
+ // Google's production kernel has a patch to export the TSC
+ // frequency through sysfs. If the kernel is exporting the TSC
+ // frequency use that. There are issues where cpuinfo_max_freq
+ // cannot be relied on because the BIOS may be exporting an invalid
+ // p-state (on x86) or p-states may be used to put the processor in
+ // a new mode (turbo mode). Essentially, those frequencies cannot
+ // always be relied upon. The same reasons apply to /proc/cpuinfo as
+ // well.
+ if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+ return freq * 1e3; // Value is kHz.
+ }
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+ // On these platforms, the TSC frequency is the nominal CPU
+ // frequency. But without having the kernel export it directly
+ // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
+ // other way to reliably get the TSC frequency, so we have to
+ // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by
+ // exporting "fake" frequencies for implementing new features. For
+ // example, Intel's turbo mode is enabled by exposing a p-state
+ // value with a higher frequency than that of the real TSC
+ // rate. Because of this, we prefer to measure the TSC rate
+ // ourselves on i386 and x86-64.
+ return MeasureTscFrequency();
+#else
+
+ // If CPU scaling is in effect, we want to use the *maximum*
+ // frequency, not whatever CPU speed some random processor happens
+ // to be using now.
+ if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &freq)) {
+ return freq * 1e3; // Value is kHz.
+ }
+
+ return 1.0;
+#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+}
+
+#endif
+
ABSL_CONST_INIT static once_flag init_num_cpus_once;
ABSL_CONST_INIT static int num_cpus = 0;
-
+
// NumCPUs() may be called before main() and before malloc is properly
// initialized, therefore this must not allocate memory.
-int NumCPUs() {
+int NumCPUs() {
base_internal::LowLevelCallOnce(
&init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
- return num_cpus;
-}
-
+ return num_cpus;
+}
+
// A default frequency of 0.0 might be dangerous if it is used in division.
ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
// NominalCPUFrequency() may be called before main() and before malloc is
// properly initialized, therefore this must not allocate memory.
-double NominalCPUFrequency() {
+double NominalCPUFrequency() {
base_internal::LowLevelCallOnce(
&init_nominal_cpu_frequency_once,
[]() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
- return nominal_cpu_frequency;
-}
-
-#if defined(_WIN32)
-
-pid_t GetTID() {
+ return nominal_cpu_frequency;
+}
+
+#if defined(_WIN32)
+
+pid_t GetTID() {
return pid_t{GetCurrentThreadId()};
-}
-
-#elif defined(__linux__)
-
-#ifndef SYS_gettid
-#define SYS_gettid __NR_gettid
-#endif
-
-pid_t GetTID() {
- return syscall(SYS_gettid);
-}
-
-#elif defined(__akaros__)
-
-pid_t GetTID() {
- // Akaros has a concept of "vcore context", which is the state the program
- // is forced into when we need to make a user-level scheduling decision, or
- // run a signal handler. This is analogous to the interrupt context that a
- // CPU might enter if it encounters some kind of exception.
- //
- // There is no current thread context in vcore context, but we need to give
- // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
- // Thread 0 always exists, so if we are in vcore context, we return that.
- //
- // Otherwise, we know (since we are using pthreads) that the uthread struct
- // current_uthread is pointing to is the first element of a
- // struct pthread_tcb, so we extract and return the thread ID from that.
- //
- // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
- // structure at some point. We should modify this code to remove the cast
- // when that happens.
- if (in_vcore_context())
- return 0;
- return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
-}
-
-#elif defined(__myriad2__)
-
-pid_t GetTID() {
- uint32_t tid;
- rtems_task_ident(RTEMS_SELF, 0, &tid);
- return tid;
-}
-
-#else
-
-// Fallback implementation of GetTID using pthread_getspecific.
+}
+
+#elif defined(__linux__)
+
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+
+pid_t GetTID() {
+ return syscall(SYS_gettid);
+}
+
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+ // Akaros has a concept of "vcore context", which is the state the program
+ // is forced into when we need to make a user-level scheduling decision, or
+ // run a signal handler. This is analogous to the interrupt context that a
+ // CPU might enter if it encounters some kind of exception.
+ //
+ // There is no current thread context in vcore context, but we need to give
+ // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+ // Thread 0 always exists, so if we are in vcore context, we return that.
+ //
+ // Otherwise, we know (since we are using pthreads) that the uthread struct
+ // current_uthread is pointing to is the first element of a
+ // struct pthread_tcb, so we extract and return the thread ID from that.
+ //
+ // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+ // structure at some point. We should modify this code to remove the cast
+ // when that happens.
+ if (in_vcore_context())
+ return 0;
+ return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
+#elif defined(__myriad2__)
+
+pid_t GetTID() {
+ uint32_t tid;
+ rtems_task_ident(RTEMS_SELF, 0, &tid);
+ return tid;
+}
+
+#else
+
+// Fallback implementation of GetTID using pthread_getspecific.
ABSL_CONST_INIT static once_flag tid_once;
ABSL_CONST_INIT static pthread_key_t tid_key;
ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// We set a bit per thread in this array to indicate that an ID is in
-// use. ID 0 is unused because it is the default value returned by
-// pthread_getspecific().
+
+// We set a bit per thread in this array to indicate that an ID is in
+// use. ID 0 is unused because it is the default value returned by
+// pthread_getspecific().
ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
ABSL_GUARDED_BY(tid_lock) = nullptr;
-static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
-
-// Returns the TID to tid_array.
-static void FreeTID(void *v) {
- intptr_t tid = reinterpret_cast<intptr_t>(v);
- int word = tid / kBitsPerWord;
- uint32_t mask = ~(1u << (tid % kBitsPerWord));
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
- (*tid_array)[word] &= mask;
-}
-
-static void InitGetTID() {
- if (pthread_key_create(&tid_key, FreeTID) != 0) {
- // The logging system calls GetTID() so it can't be used here.
- perror("pthread_key_create failed");
- abort();
- }
-
- // Initialize tid_array.
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- tid_array = new std::vector<uint32_t>(1);
- (*tid_array)[0] = 1; // ID 0 is never-allocated.
-}
-
-// Return a per-thread small integer ID from pthread's thread-specific data.
-pid_t GetTID() {
- absl::call_once(tid_once, InitGetTID);
-
- intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
- if (tid != 0) {
- return tid;
- }
-
- int bit; // tid_array[word] = 1u << bit;
- size_t word;
- {
- // Search for the first unused ID.
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- // First search for a word in the array that is not all ones.
- word = 0;
- while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
- ++word;
- }
- if (word == tid_array->size()) {
- tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
- }
- // Search for a zero bit in the word.
- bit = 0;
- while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
- ++bit;
- }
- tid = (word * kBitsPerWord) + bit;
- (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
- }
-
- if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
- perror("pthread_setspecific failed");
- abort();
- }
-
- return static_cast<pid_t>(tid);
-}
-
-#endif
-
+static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
+
+// Returns the TID to tid_array.
+static void FreeTID(void *v) {
+ intptr_t tid = reinterpret_cast<intptr_t>(v);
+ int word = tid / kBitsPerWord;
+ uint32_t mask = ~(1u << (tid % kBitsPerWord));
+ absl::base_internal::SpinLockHolder lock(&tid_lock);
+ assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
+ (*tid_array)[word] &= mask;
+}
+
+static void InitGetTID() {
+ if (pthread_key_create(&tid_key, FreeTID) != 0) {
+ // The logging system calls GetTID() so it can't be used here.
+ perror("pthread_key_create failed");
+ abort();
+ }
+
+ // Initialize tid_array.
+ absl::base_internal::SpinLockHolder lock(&tid_lock);
+ tid_array = new std::vector<uint32_t>(1);
+ (*tid_array)[0] = 1; // ID 0 is never-allocated.
+}
+
+// Return a per-thread small integer ID from pthread's thread-specific data.
+pid_t GetTID() {
+ absl::call_once(tid_once, InitGetTID);
+
+ intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+ if (tid != 0) {
+ return tid;
+ }
+
+ int bit; // tid_array[word] = 1u << bit;
+ size_t word;
+ {
+ // Search for the first unused ID.
+ absl::base_internal::SpinLockHolder lock(&tid_lock);
+ // First search for a word in the array that is not all ones.
+ word = 0;
+ while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
+ ++word;
+ }
+ if (word == tid_array->size()) {
+ tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
+ }
+ // Search for a zero bit in the word.
+ bit = 0;
+ while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
+ ++bit;
+ }
+ tid = (word * kBitsPerWord) + bit;
+ (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
+ }
+
+ if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
+ perror("pthread_setspecific failed");
+ abort();
+ }
+
+ return static_cast<pid_t>(tid);
+}
+
+#endif
+
// GetCachedTID() caches the thread ID in thread-local storage (which is a
// userspace construct) to avoid unnecessary system calls. Without this caching,
// it can take roughly 98ns, while it takes roughly 1ns with this caching.
@@ -503,6 +503,6 @@ pid_t GetCachedTID() {
#endif // ABSL_HAVE_THREAD_LOCAL
}
-} // namespace base_internal
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.h b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.h
index f35695a2ab..119cf1f0e8 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/sysinfo.h
@@ -1,65 +1,65 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file includes routines to find out characteristics
-// of the machine a program is running on. It is undoubtedly
-// system-dependent.
-
-// Functions listed here that accept a pid_t as an argument act on the
-// current process if the pid_t argument is 0
-// All functions here are thread-hostile due to file caching unless
-// commented otherwise.
-
-#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
-#define ABSL_BASE_INTERNAL_SYSINFO_H_
-
-#ifndef _WIN32
-#include <sys/types.h>
-#endif
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file includes routines to find out characteristics
+// of the machine a program is running on. It is undoubtedly
+// system-dependent.
+
+// Functions listed here that accept a pid_t as an argument act on the
+// current process if the pid_t argument is 0
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
+#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
+#define ABSL_BASE_INTERNAL_SYSINFO_H_
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
#include <cstdint>
#include "absl/base/config.h"
-#include "absl/base/port.h"
-
-namespace absl {
+#include "absl/base/port.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// Nominal core processor cycles per second of each processor. This is _not_
-// necessarily the frequency of the CycleClock counter (see cycleclock.h)
-// Thread-safe.
-double NominalCPUFrequency();
-
-// Number of logical processors (hyperthreads) in system. Thread-safe.
-int NumCPUs();
-
-// Return the thread id of the current thread, as told by the system.
-// No two currently-live threads implemented by the OS shall have the same ID.
-// Thread ids of exited threads may be reused. Multiple user-level threads
-// may have the same thread ID if multiplexed on the same OS thread.
-//
-// On Linux, you may send a signal to the resulting ID with kill(). However,
-// it is recommended for portability that you use pthread_kill() instead.
-#ifdef _WIN32
+namespace base_internal {
+
+// Nominal core processor cycles per second of each processor. This is _not_
+// necessarily the frequency of the CycleClock counter (see cycleclock.h)
+// Thread-safe.
+double NominalCPUFrequency();
+
+// Number of logical processors (hyperthreads) in system. Thread-safe.
+int NumCPUs();
+
+// Return the thread id of the current thread, as told by the system.
+// No two currently-live threads implemented by the OS shall have the same ID.
+// Thread ids of exited threads may be reused. Multiple user-level threads
+// may have the same thread ID if multiplexed on the same OS thread.
+//
+// On Linux, you may send a signal to the resulting ID with kill(). However,
+// it is recommended for portability that you use pthread_kill() instead.
+#ifdef _WIN32
// On Windows, process id and thread id are of the same type according to the
// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
// 32-bit type.
using pid_t = uint32_t;
-#endif
-pid_t GetTID();
-
+#endif
+pid_t GetTID();
+
// Like GetTID(), but caches the result in thread-local storage in order
// to avoid unnecessary system calls. Note that there are some cases where
// one must call through to GetTID directly, which is why this exists as a
@@ -67,8 +67,8 @@ pid_t GetTID();
// an asynchronous signal-handling context nor right after a call to fork().
pid_t GetCachedTID();
-} // namespace base_internal
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_SYSINFO_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_SYSINFO_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/thread_annotations.h b/contrib/restricted/abseil-cpp/absl/base/internal/thread_annotations.h
index c74a34a139..4dab6a9c15 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/thread_annotations.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/thread_annotations.h
@@ -1,271 +1,271 @@
-// Copyright 2019 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// File: thread_annotations.h
-// -----------------------------------------------------------------------------
-//
-// WARNING: This is a backwards compatible header and it will be removed after
-// the migration to prefixed thread annotations is finished; please include
-// "absl/base/thread_annotations.h".
-//
-// This header file contains macro definitions for thread safety annotations
-// that allow developers to document the locking policies of multi-threaded
-// code. The annotations can also help program analysis tools to identify
-// potential thread safety issues.
-//
-// These annotations are implemented using compiler attributes. Using the macros
-// defined here instead of raw attributes allow for portability and future
-// compatibility.
-//
-// When referring to mutexes in the arguments of the attributes, you should
-// use variable names or more complex expressions (e.g. my_object->mutex_)
-// that evaluate to a concrete mutex object whenever possible. If the mutex
-// you want to refer to is not in scope, you may use a member pointer
-// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
-
-#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
-#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
-
-#if defined(__clang__)
-#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
-#else
-#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
-#endif
-
-// GUARDED_BY()
-//
-// Documents if a shared field or global variable needs to be protected by a
-// mutex. GUARDED_BY() allows the user to specify a particular mutex that
-// should be held when accessing the annotated variable.
-//
-// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to
-// local variables, a local variable and its associated mutex can often be
-// combined into a small class or struct, thereby allowing the annotation.
-//
-// Example:
-//
-// class Foo {
-// Mutex mu_;
-// int p1_ GUARDED_BY(mu_);
-// ...
-// };
-#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
-
-// PT_GUARDED_BY()
-//
-// Documents if the memory location pointed to by a pointer should be guarded
-// by a mutex when dereferencing the pointer.
-//
-// Example:
-// class Foo {
-// Mutex mu_;
-// int *p1_ PT_GUARDED_BY(mu_);
-// ...
-// };
-//
-// Note that a pointer variable to a shared memory location could itself be a
-// shared variable.
-//
-// Example:
-//
-// // `q_`, guarded by `mu1_`, points to a shared memory location that is
-// // guarded by `mu2_`:
-// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_);
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
-
-// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
-//
-// Documents the acquisition order between locks that can be held
-// simultaneously by a thread. For any two locks that need to be annotated
-// to establish an acquisition order, only one of them needs the annotation.
-// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
-// and ACQUIRED_BEFORE.)
-//
-// As with GUARDED_BY, this is only applicable to mutexes that are shared
-// fields or global variables.
-//
-// Example:
-//
-// Mutex m1_;
-// Mutex m2_ ACQUIRED_AFTER(m1_);
-#define ACQUIRED_AFTER(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
-
-#define ACQUIRED_BEFORE(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
-
-// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
-//
-// Documents a function that expects a mutex to be held prior to entry.
-// The mutex is expected to be held both on entry to, and exit from, the
-// function.
-//
-// An exclusive lock allows read-write access to the guarded data member(s), and
-// only one thread can acquire a lock exclusively at any one time. A shared lock
-// allows read-only access, and any number of threads can acquire a shared lock
-// concurrently.
-//
-// Generally, non-const methods should be annotated with
-// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
-// SHARED_LOCKS_REQUIRED.
-//
-// Example:
-//
-// Mutex mu1, mu2;
-// int a GUARDED_BY(mu1);
-// int b GUARDED_BY(mu2);
-//
-// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
-// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
-#define EXCLUSIVE_LOCKS_REQUIRED(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
-
-#define SHARED_LOCKS_REQUIRED(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
-
-// LOCKS_EXCLUDED()
-//
-// Documents the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as Abseil's `Mutex` locks are
-// non-reentrant).
-#define LOCKS_EXCLUDED(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
-
-// LOCK_RETURNED()
-//
-// Documents a function that returns a mutex without acquiring it. For example,
-// a public getter method that returns a pointer to a private mutex should
-// be annotated with LOCK_RETURNED.
-#define LOCK_RETURNED(x) \
- THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-
-// LOCKABLE
-//
-// Documents if a class/type is a lockable type (such as the `Mutex` class).
-#define LOCKABLE \
- THREAD_ANNOTATION_ATTRIBUTE__(lockable)
-
-// SCOPED_LOCKABLE
-//
-// Documents if a class does RAII locking (such as the `MutexLock` class).
-// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
-// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
-// arguments; the analysis will assume that the destructor unlocks whatever the
-// constructor locked.
-#define SCOPED_LOCKABLE \
- THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-
-// EXCLUSIVE_LOCK_FUNCTION()
-//
-// Documents functions that acquire a lock in the body of a function, and do
-// not release it.
-#define EXCLUSIVE_LOCK_FUNCTION(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
-
-// SHARED_LOCK_FUNCTION()
-//
-// Documents functions that acquire a shared (reader) lock in the body of a
-// function, and do not release it.
-#define SHARED_LOCK_FUNCTION(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
-
-// UNLOCK_FUNCTION()
-//
-// Documents functions that expect a lock to be held on entry to the function,
-// and release it in the body of the function.
-#define UNLOCK_FUNCTION(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
-
-// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
-//
-// Documents functions that try to acquire a lock, and return success or failure
-// (or a non-boolean value that can be interpreted as a boolean).
-// The first argument should be `true` for functions that return `true` on
-// success, or `false` for functions that return `false` on success. The second
-// argument specifies the mutex that is locked on success. If unspecified, this
-// mutex is assumed to be `this`.
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
-
-#define SHARED_TRYLOCK_FUNCTION(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
-
-// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
-//
-// Documents functions that dynamically check to see if a lock is held, and fail
-// if it is not held.
-#define ASSERT_EXCLUSIVE_LOCK(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
-
-#define ASSERT_SHARED_LOCK(...) \
- THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
-
-// NO_THREAD_SAFETY_ANALYSIS
-//
-// Turns off thread safety checking within the body of a particular function.
-// This annotation is used to mark functions that are known to be correct, but
-// the locking behavior is more complicated than the analyzer can handle.
-#define NO_THREAD_SAFETY_ANALYSIS \
- THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
-
-//------------------------------------------------------------------------------
-// Tool-Supplied Annotations
-//------------------------------------------------------------------------------
-
-// TS_UNCHECKED should be placed around lock expressions that are not valid
-// C++ syntax, but which are present for documentation purposes. These
-// annotations will be ignored by the analysis.
-#define TS_UNCHECKED(x) ""
-
-// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
-// It is used by automated tools to mark and disable invalid expressions.
-// The annotation should either be fixed, or changed to TS_UNCHECKED.
-#define TS_FIXME(x) ""
-
-// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
-// a particular function. However, this attribute is used to mark functions
-// that are incorrect and need to be fixed. It is used by automated tools to
-// avoid breaking the build when the analysis is updated.
-// Code owners are expected to eventually fix the routine.
-#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
-
-// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
-// annotation that needs to be fixed, because it is producing thread safety
-// warning. It disables the GUARDED_BY.
-#define GUARDED_BY_FIXME(x)
-
-// Disables warnings for a single read operation. This can be used to avoid
-// warnings when it is known that the read is not actually involved in a race,
-// but the compiler cannot confirm that.
-#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
-
-
-namespace thread_safety_analysis {
-
-// Takes a reference to a guarded data member, and returns an unguarded
-// reference.
-template <typename T>
-inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
- return v;
-}
-
-template <typename T>
-inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
- return v;
-}
-
-} // namespace thread_safety_analysis
-
-#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: thread_annotations.h
+// -----------------------------------------------------------------------------
+//
+// WARNING: This is a backwards compatible header and it will be removed after
+// the migration to prefixed thread annotations is finished; please include
+// "absl/base/thread_annotations.h".
+//
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to
+// local variables, a local variable and its associated mutex can often be
+// combined into a small class or struct, thereby allowing the annotation.
+//
+// Example:
+//
+// class Foo {
+// Mutex mu_;
+// int p1_ GUARDED_BY(mu_);
+// ...
+// };
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+// class Foo {
+// Mutex mu_;
+// int *p1_ PT_GUARDED_BY(mu_);
+// ...
+// };
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+// // `q_`, guarded by `mu1_`, points to a shared memory location that is
+// // guarded by `mu2_`:
+// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// As with GUARDED_BY, this is only applicable to mutexes that are shared
+// fields or global variables.
+//
+// Example:
+//
+// Mutex m1_;
+// Mutex m2_ ACQUIRED_AFTER(m1_);
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// An exclusive lock allows read-write access to the guarded data member(s), and
+// only one thread can acquire a lock exclusively at any one time. A shared lock
+// allows read-only access, and any number of threads can acquire a shared lock
+// concurrently.
+//
+// Generally, non-const methods should be annotated with
+// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
+// SHARED_LOCKS_REQUIRED.
+//
+// Example:
+//
+// Mutex mu1, mu2;
+// int a GUARDED_BY(mu1);
+// int b GUARDED_BY(mu2);
+//
+// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
+// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) \
+ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function. However, this attribute is used to mark functions
+// that are incorrect and need to be fixed. It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning. It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation. This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+} // namespace thread_safety_analysis
+
+#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
index 856b90e640..9950e63a79 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.cc
@@ -1,63 +1,63 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/thread_identity.h"
-
-#ifndef _WIN32
-#include <pthread.h>
-#include <signal.h>
-#endif
-
-#include <atomic>
-#include <cassert>
-#include <memory>
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
#include "absl/base/attributes.h"
-#include "absl/base/call_once.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-
-namespace absl {
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-namespace {
-// Used to co-ordinate one-time creation of our pthread_key
-absl::once_flag init_thread_identity_key_once;
-pthread_key_t thread_identity_pthread_key;
-std::atomic<bool> pthread_key_initialized(false);
-
-void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
- pthread_key_create(&thread_identity_pthread_key, reclaimer);
- pthread_key_initialized.store(true, std::memory_order_release);
-}
-} // namespace
-#endif
-
-#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
- ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-// The actual TLS storage for a thread's currently associated ThreadIdentity.
-// This is referenced by inline accessors in the header.
-// "protected" visibility ensures that if multiple instances of Abseil code
-// exist within a process (via dlopen() or similar), references to
-// thread_identity_ptr from each instance of the code will refer to
-// *different* instances of this ptr.
+namespace base_internal {
+
+#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+namespace {
+// Used to co-ordinate one-time creation of our pthread_key
+absl::once_flag init_thread_identity_key_once;
+pthread_key_t thread_identity_pthread_key;
+std::atomic<bool> pthread_key_initialized(false);
+
+void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
+ pthread_key_create(&thread_identity_pthread_key, reclaimer);
+ pthread_key_initialized.store(true, std::memory_order_release);
+}
+} // namespace
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+// The actual TLS storage for a thread's currently associated ThreadIdentity.
+// This is referenced by inline accessors in the header.
+// "protected" visibility ensures that if multiple instances of Abseil code
+// exist within a process (via dlopen() or similar), references to
+// thread_identity_ptr from each instance of the code will refer to
+// *different* instances of this ptr.
// Apple platforms have the visibility attribute, but issue a compile warning
// that protected visibility is unsupported.
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
-__attribute__((visibility("protected")))
+__attribute__((visibility("protected")))
#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
@@ -65,57 +65,57 @@ ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL)
thread_local ThreadIdentity* thread_identity_ptr = nullptr;
#endif // ABSL_PER_THREAD_TLS
-#endif // TLS or CPP11
-
-void SetCurrentThreadIdentity(
- ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
- assert(CurrentThreadIdentityIfPresent() == nullptr);
- // Associate our destructor.
- // NOTE: This call to pthread_setspecific is currently the only immovable
- // barrier to CurrentThreadIdentity() always being async signal safe.
-#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
- // NOTE: Not async-safe. But can be open-coded.
- absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
- reclaimer);
-
+#endif // TLS or CPP11
+
+void SetCurrentThreadIdentity(
+ ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+ assert(CurrentThreadIdentityIfPresent() == nullptr);
+ // Associate our destructor.
+ // NOTE: This call to pthread_setspecific is currently the only immovable
+ // barrier to CurrentThreadIdentity() always being async signal safe.
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ // NOTE: Not async-safe. But can be open-coded.
+ absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+ reclaimer);
+
#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
// Emscripten and MinGW pthread implementations does not support signals.
- // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
- // for more information.
- pthread_setspecific(thread_identity_pthread_key,
- reinterpret_cast<void*>(identity));
-#else
- // We must mask signals around the call to setspecific as with current glibc,
- // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
- // may zero our value.
- //
- // While not officially async-signal safe, getspecific within a signal handler
- // is otherwise OK.
- sigset_t all_signals;
- sigset_t curr_signals;
- sigfillset(&all_signals);
- pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
- pthread_setspecific(thread_identity_pthread_key,
- reinterpret_cast<void*>(identity));
- pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
+ // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+ // for more information.
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+#else
+ // We must mask signals around the call to setspecific as with current glibc,
+ // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
+ // may zero our value.
+ //
+ // While not officially async-signal safe, getspecific within a signal handler
+ // is otherwise OK.
+ sigset_t all_signals;
+ sigset_t curr_signals;
+ sigfillset(&all_signals);
+ pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+ pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
#endif // !__EMSCRIPTEN__ && !__MINGW32__
-
-#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
- // NOTE: Not async-safe. But can be open-coded.
- absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
- reclaimer);
- pthread_setspecific(thread_identity_pthread_key,
- reinterpret_cast<void*>(identity));
- thread_identity_ptr = identity;
-#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
- thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
- holder(identity, reclaimer);
- thread_identity_ptr = identity;
-#else
-#error Unimplemented ABSL_THREAD_IDENTITY_MODE
-#endif
-}
-
+
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
+ // NOTE: Not async-safe. But can be open-coded.
+ absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+ reclaimer);
+ pthread_setspecific(thread_identity_pthread_key,
+ reinterpret_cast<void*>(identity));
+ thread_identity_ptr = identity;
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+ thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
+ holder(identity, reclaimer);
+ thread_identity_ptr = identity;
+#else
+#error Unimplemented ABSL_THREAD_IDENTITY_MODE
+#endif
+}
+
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
@@ -128,28 +128,28 @@ ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
#endif
#endif
-void ClearCurrentThreadIdentity() {
-#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
- ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
- thread_identity_ptr = nullptr;
-#elif ABSL_THREAD_IDENTITY_MODE == \
- ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
- // pthread_setspecific expected to clear value on destruction
- assert(CurrentThreadIdentityIfPresent() == nullptr);
-#endif
-}
-
-#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
-ThreadIdentity* CurrentThreadIdentityIfPresent() {
- bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
- if (!initialized) {
- return nullptr;
- }
- return reinterpret_cast<ThreadIdentity*>(
- pthread_getspecific(thread_identity_pthread_key));
-}
-#endif
-
-} // namespace base_internal
+void ClearCurrentThreadIdentity() {
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+ thread_identity_ptr = nullptr;
+#elif ABSL_THREAD_IDENTITY_MODE == \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ // pthread_setspecific expected to clear value on destruction
+ assert(CurrentThreadIdentityIfPresent() == nullptr);
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ThreadIdentity* CurrentThreadIdentityIfPresent() {
+ bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
+ if (!initialized) {
+ return nullptr;
+ }
+ return reinterpret_cast<ThreadIdentity*>(
+ pthread_getspecific(thread_identity_pthread_key));
+}
+#endif
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
index 7eef2e0df8..659694b326 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/thread_identity.h
@@ -1,75 +1,75 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Each active thread has an ThreadIdentity that may represent the thread in
-// various level interfaces. ThreadIdentity objects are never deallocated.
-// When a thread terminates, its ThreadIdentity object may be reused for a
-// thread created later.
-
-#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
-#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
-
-#ifndef _WIN32
-#include <pthread.h>
-// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
-// supported.
-#include <unistd.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces. ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
#include "absl/base/config.h"
-#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
-
-namespace absl {
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-struct SynchLocksHeld;
-struct SynchWaitParams;
-
-namespace base_internal {
-
-class SpinLock;
-struct ThreadIdentity;
-
-// Used by the implementation of absl::Mutex and absl::CondVar.
-struct PerThreadSynch {
- // The internal representation of absl::Mutex and absl::CondVar rely
- // on the alignment of PerThreadSynch. Both store the address of the
- // PerThreadSynch in the high-order bits of their internal state,
- // which means the low kLowZeroBits of the address of PerThreadSynch
- // must be zero.
- static constexpr int kLowZeroBits = 8;
- static constexpr int kAlignment = 1 << kLowZeroBits;
-
- // Returns the associated ThreadIdentity.
- // This can be implemented as a cast because we guarantee
- // PerThreadSynch is the first element of ThreadIdentity.
- ThreadIdentity* thread_identity() {
- return reinterpret_cast<ThreadIdentity*>(this);
- }
-
- PerThreadSynch *next; // Circular waiter queue; initialized to 0.
- PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
- // up to and including "skip" have same
- // condition as this, and will be woken later
- bool may_skip; // if false while on mutex queue, a mutex unlocker
- // is using this PerThreadSynch as a terminator. Its
- // skip field must not be filled in because the loop
- // might then skip over the terminator.
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of absl::Mutex and absl::CondVar.
+struct PerThreadSynch {
+ // The internal representation of absl::Mutex and absl::CondVar rely
+ // on the alignment of PerThreadSynch. Both store the address of the
+ // PerThreadSynch in the high-order bits of their internal state,
+ // which means the low kLowZeroBits of the address of PerThreadSynch
+ // must be zero.
+ static constexpr int kLowZeroBits = 8;
+ static constexpr int kAlignment = 1 << kLowZeroBits;
+
+ // Returns the associated ThreadIdentity.
+ // This can be implemented as a cast because we guarantee
+ // PerThreadSynch is the first element of ThreadIdentity.
+ ThreadIdentity* thread_identity() {
+ return reinterpret_cast<ThreadIdentity*>(this);
+ }
+
+ PerThreadSynch *next; // Circular waiter queue; initialized to 0.
+ PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
+ // up to and including "skip" have same
+ // condition as this, and will be woken later
+ bool may_skip; // if false while on mutex queue, a mutex unlocker
+ // is using this PerThreadSynch as a terminator. Its
+ // skip field must not be filled in because the loop
+ // might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
@@ -92,24 +92,24 @@ struct PerThreadSynch {
// chances of debug logging information being
// output successfully.
int priority; // Priority of thread (updated every so often).
-
- // State values:
- // kAvailable: This PerThreadSynch is available.
- // kQueued: This PerThreadSynch is unavailable, it's currently queued on a
- // Mutex or CondVar waistlist.
- //
- // Transitions from kQueued to kAvailable require a release
- // barrier. This is needed as a waiter may use "state" to
- // independently observe that it's no longer queued.
- //
- // Transitions from kAvailable to kQueued require no barrier, they
- // are externally ordered by the Mutex.
- enum State {
- kAvailable,
- kQueued
- };
- std::atomic<State> state;
-
+
+ // State values:
+ // kAvailable: This PerThreadSynch is available.
+ // kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+ // Mutex or CondVar waistlist.
+ //
+ // Transitions from kQueued to kAvailable require a release
+ // barrier. This is needed as a waiter may use "state" to
+ // independently observe that it's no longer queued.
+ //
+ // Transitions from kAvailable to kQueued require no barrier, they
+ // are externally ordered by the Mutex.
+ enum State {
+ kAvailable,
+ kQueued
+ };
+ std::atomic<State> state;
+
// The wait parameters of the current wait. waitp is null if the
// thread is not waiting. Transitions from null to non-null must
// occur before the enqueue commit point (state = kQueued in
@@ -121,112 +121,112 @@ struct PerThreadSynch {
// but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;
-
+
intptr_t readers; // Number of readers in mutex.
-
+
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
-
- // Locks held; used during deadlock detection.
- // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
- SynchLocksHeld *all_locks;
-};
-
+
+ // Locks held; used during deadlock detection.
+ // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+ SynchLocksHeld *all_locks;
+};
+
// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
-struct ThreadIdentity {
- // Must be the first member. The Mutex implementation requires that
- // the PerThreadSynch object associated with each thread is
- // PerThreadSynch::kAlignment aligned. We provide this alignment on
- // ThreadIdentity itself.
- PerThreadSynch per_thread_synch;
-
- // Private: Reserved for absl::synchronization_internal::Waiter.
- struct WaiterState {
+struct ThreadIdentity {
+ // Must be the first member. The Mutex implementation requires that
+ // the PerThreadSynch object associated with each thread is
+ // PerThreadSynch::kAlignment aligned. We provide this alignment on
+ // ThreadIdentity itself.
+ PerThreadSynch per_thread_synch;
+
+ // Private: Reserved for absl::synchronization_internal::Waiter.
+ struct WaiterState {
alignas(void*) char data[128];
- } waiter_state;
-
- // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
- std::atomic<int>* blocked_count_ptr;
-
- // The following variables are mostly read/written just by the
- // thread itself. The only exception is that these are read by
- // a ticker thread as a hint.
- std::atomic<int> ticker; // Tick counter, incremented once per second.
- std::atomic<int> wait_start; // Ticker value when thread started waiting.
- std::atomic<bool> is_idle; // Has thread become idle yet?
-
- ThreadIdentity* next;
-};
-
-// Returns the ThreadIdentity object representing the calling thread; guaranteed
-// to be unique for its lifetime. The returned object will remain valid for the
-// program's lifetime; although it may be re-assigned to a subsequent thread.
-// If one does not exist, return nullptr instead.
-//
-// Does not malloc(*), and is async-signal safe.
-// [*] Technically pthread_setspecific() does malloc on first use; however this
-// is handled internally within tcmalloc's initialization already.
-//
-// New ThreadIdentity objects can be constructed and associated with a thread
-// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
-ThreadIdentity* CurrentThreadIdentityIfPresent();
-
-using ThreadIdentityReclaimerFunction = void (*)(void*);
-
-// Sets the current thread identity to the given value. 'reclaimer' is a
-// pointer to the global function for cleaning up instances on thread
-// destruction.
-void SetCurrentThreadIdentity(ThreadIdentity* identity,
- ThreadIdentityReclaimerFunction reclaimer);
-
-// Removes the currently associated ThreadIdentity from the running thread.
-// This must be called from inside the ThreadIdentityReclaimerFunction, and only
-// from that function.
-void ClearCurrentThreadIdentity();
-
-// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
-// index>
-#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ } waiter_state;
+
+ // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+ std::atomic<int>* blocked_count_ptr;
+
+ // The following variables are mostly read/written just by the
+ // thread itself. The only exception is that these are read by
+ // a ticker thread as a hint.
+ std::atomic<int> ticker; // Tick counter, incremented once per second.
+ std::atomic<int> wait_start; // Ticker value when thread started waiting.
+ std::atomic<bool> is_idle; // Has thread become idle yet?
+
+ ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value. 'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+ ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
-#else
-#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
-#endif
-
-#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
-#else
-#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
-#endif
-
-#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
-#else
-#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
-#endif
-
-#ifdef ABSL_THREAD_IDENTITY_MODE
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
-#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
-#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
-#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
- (__GOOGLE_GRTE_VERSION__ >= 20140228L)
-// Support for async-safe TLS was specifically added in GRTEv4. It's not
-// present in the upstream eglibc.
-// Note: Current default for production systems.
-#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
-#else
-#define ABSL_THREAD_IDENTITY_MODE \
- ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
-#endif
-
-#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
- ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-
+ (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4. It's not
+// present in the upstream eglibc.
+// Note: Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
#if ABSL_PER_THREAD_TLS
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
thread_identity_ptr;
@@ -235,7 +235,7 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#else
#error Thread-local storage not detected on this platform
#endif
-
+
// thread_local variables cannot be in headers exposed by DLLs or in certain
// build configurations on Apple platforms. However, it is important for
// performance reasons in general that `CurrentThreadIdentityIfPresent` be
@@ -248,18 +248,18 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#endif
#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
-inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
- return thread_identity_ptr;
-}
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+ return thread_identity_ptr;
+}
+#endif
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
#endif
-
-#elif ABSL_THREAD_IDENTITY_MODE != \
- ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
-#error Unknown ABSL_THREAD_IDENTITY_MODE
-#endif
-
-} // namespace base_internal
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
index 422bf199d1..c260ff1eed 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.cc
@@ -1,196 +1,196 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/throw_delegate.h"
-
-#include <cstdlib>
-#include <functional>
-#include <new>
-#include <stdexcept>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/throw_delegate.h"
+
+#include <cstdlib>
+#include <functional>
+#include <new>
+#include <stdexcept>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
+namespace base_internal {
+
// NOTE: The various STL exception throwing functions are placed within the
// #ifdef blocks so the symbols aren't exposed on platforms that don't support
// them, such as the Android NDK. For example, ANGLE fails to link when building
// within AOSP without them, since the STL functions don't exist.
-namespace {
+namespace {
#ifdef ABSL_HAVE_EXCEPTIONS
-template <typename T>
-[[noreturn]] void Throw(const T& error) {
- throw error;
+template <typename T>
+[[noreturn]] void Throw(const T& error) {
+ throw error;
}
-#endif
-} // namespace
-
-void ThrowStdLogicError(const std::string& what_arg) {
+#endif
+} // namespace
+
+void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdLogicError(const char* what_arg) {
+}
+void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-void ThrowStdInvalidArgument(const std::string& what_arg) {
+}
+void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdInvalidArgument(const char* what_arg) {
+}
+void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdDomainError(const std::string& what_arg) {
+}
+
+void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdDomainError(const char* what_arg) {
+}
+void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdLengthError(const std::string& what_arg) {
+}
+
+void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdLengthError(const char* what_arg) {
+}
+void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdOutOfRange(const std::string& what_arg) {
+}
+
+void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdOutOfRange(const char* what_arg) {
+}
+void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdRuntimeError(const std::string& what_arg) {
+}
+
+void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdRuntimeError(const char* what_arg) {
+}
+void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdRangeError(const std::string& what_arg) {
+}
+
+void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdRangeError(const char* what_arg) {
+}
+void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdOverflowError(const std::string& what_arg) {
+}
+
+void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdOverflowError(const char* what_arg) {
+}
+void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
-void ThrowStdUnderflowError(const std::string& what_arg) {
+}
+
+void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
-}
-void ThrowStdUnderflowError(const char* what_arg) {
+}
+void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
-}
-
+}
+
void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_function_call());
@@ -198,7 +198,7 @@ void ThrowStdBadFunctionCall() {
std::abort();
#endif
}
-
+
void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_alloc());
@@ -206,7 +206,7 @@ void ThrowStdBadAlloc() {
std::abort();
#endif
}
-
-} // namespace base_internal
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.h b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.h
index aa0d04b8c7..075f527254 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate.h
@@ -1,75 +1,75 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
-#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
-
-#include <string>
-
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+
+#include <string>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// Helper functions that allow throwing exceptions consistently from anywhere.
-// The main use case is for header-based libraries (eg templates), as they will
-// be built by many different targets with their own compiler options.
-// In particular, this will allow a safe way to throw exceptions even if the
-// caller is compiled with -fno-exceptions. This is intended for implementing
-// things like map<>::at(), which the standard documents as throwing an
-// exception on error.
-//
-// Using other techniques like #if tricks could lead to ODR violations.
-//
-// You shouldn't use it unless you're writing code that you know will be built
-// both with and without exceptions and you need to conform to an interface
-// that uses exceptions.
-
-[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
-[[noreturn]] void ThrowStdLogicError(const char* what_arg);
-[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
-[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
-[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
-[[noreturn]] void ThrowStdDomainError(const char* what_arg);
-[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
-[[noreturn]] void ThrowStdLengthError(const char* what_arg);
-[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
-[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
-[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
-[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
-[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
-[[noreturn]] void ThrowStdRangeError(const char* what_arg);
-[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
-[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
-[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
-[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
-
-[[noreturn]] void ThrowStdBadFunctionCall();
-[[noreturn]] void ThrowStdBadAlloc();
-
-// ThrowStdBadArrayNewLength() cannot be consistently supported because
-// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
-// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
-// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
-// libcxx (as of 3.2) and msvc (as of 2015) both have it.
-// [[noreturn]] void ThrowStdBadArrayNewLength();
-
-} // namespace base_internal
+namespace base_internal {
+
+// Helper functions that allow throwing exceptions consistently from anywhere.
+// The main use case is for header-based libraries (eg templates), as they will
+// be built by many different targets with their own compiler options.
+// In particular, this will allow a safe way to throw exceptions even if the
+// caller is compiled with -fno-exceptions. This is intended for implementing
+// things like map<>::at(), which the standard documents as throwing an
+// exception on error.
+//
+// Using other techniques like #if tricks could lead to ODR violations.
+//
+// You shouldn't use it unless you're writing code that you know will be built
+// both with and without exceptions and you need to conform to an interface
+// that uses exceptions.
+
+[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLogicError(const char* what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
+[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
+[[noreturn]] void ThrowStdDomainError(const char* what_arg);
+[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLengthError(const char* what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
+[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRangeError(const char* what_arg);
+[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
+
+[[noreturn]] void ThrowStdBadFunctionCall();
+[[noreturn]] void ThrowStdBadAlloc();
+
+// ThrowStdBadArrayNewLength() cannot be consistently supported because
+// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
+// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
+// libcxx (as of 3.2) and msvc (as of 2015) both have it.
+// [[noreturn]] void ThrowStdBadArrayNewLength();
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate/ya.make b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate/ya.make
index fd3f6b6ba5..3df680adf0 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate/ya.make
@@ -1,34 +1,34 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
-
-SRCS(
- throw_delegate.cc
-)
-
-END()
+SRCDIR(contrib/restricted/abseil-cpp/absl/base/internal)
+
+SRCS(
+ throw_delegate.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/tsan_mutex_interface.h b/contrib/restricted/abseil-cpp/absl/base/internal/tsan_mutex_interface.h
index fe6fa418d4..39207d8a5c 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/tsan_mutex_interface.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/tsan_mutex_interface.h
@@ -1,68 +1,68 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// This file is intended solely for spinlock.h.
-// It provides ThreadSanitizer annotations for custom mutexes.
-// See <sanitizer/tsan_interface.h> for meaning of these annotations.
-
-#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
-#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is intended solely for spinlock.h.
+// It provides ThreadSanitizer annotations for custom mutexes.
+// See <sanitizer/tsan_interface.h> for meaning of these annotations.
+
+#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+
#include "absl/base/config.h"
-// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-// Macro intended only for internal use.
-//
-// Checks whether LLVM Thread Sanitizer interfaces are available.
-// First made available in LLVM 5.0 (Sep 2017).
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
-#endif
-
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
-#if __has_include(<sanitizer/tsan_interface.h>)
-#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
-#endif
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-#include <sanitizer/tsan_interface.h>
-
-#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
-#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
-#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
-#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
-#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
-#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
-#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
-#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
-#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
-#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
-
-#else
-
-#define ABSL_TSAN_MUTEX_CREATE(...)
-#define ABSL_TSAN_MUTEX_DESTROY(...)
-#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
-#define ABSL_TSAN_MUTEX_POST_LOCK(...)
-#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
-#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
-#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
-#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
-#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
-#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
-
-#endif
-
-#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#include <sanitizer/tsan_interface.h>
+
+#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
+#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
+#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
+#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
+#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
+#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
+#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
+#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
+
+#else
+
+#define ABSL_TSAN_MUTEX_CREATE(...)
+#define ABSL_TSAN_MUTEX_DESTROY(...)
+#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
+#define ABSL_TSAN_MUTEX_POST_LOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
+#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
+
+#endif
+
+#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h b/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
index e74c9ddd32..093dd9b499 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unaligned_access.h
@@ -1,82 +1,82 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
-#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
-
-#include <string.h>
-
-#include <cstdint>
-
-#include "absl/base/attributes.h"
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
-
-// unaligned APIs
-
-// Portable handling of unaligned loads, stores, and copies.
-
-// The unaligned API is C++ only. The declarations use C++ features
-// (namespaces, inline) which are absent or incompatible in C.
-#if defined(__cplusplus)
-namespace absl {
+
+// unaligned APIs
+
+// Portable handling of unaligned loads, stores, and copies.
+
+// The unaligned API is C++ only. The declarations use C++ features
+// (namespaces, inline) which are absent or incompatible in C.
+#if defined(__cplusplus)
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-inline uint16_t UnalignedLoad16(const void *p) {
- uint16_t t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint32_t UnalignedLoad32(const void *p) {
- uint32_t t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint64_t UnalignedLoad64(const void *p) {
- uint64_t t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
-
-inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
-
-inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
-
-} // namespace base_internal
+namespace base_internal {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+ uint16_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+ uint32_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+ uint64_t t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
- (absl::base_internal::UnalignedLoad16(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
- (absl::base_internal::UnalignedLoad32(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
- (absl::base_internal::UnalignedLoad64(_p))
-
-#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
- (absl::base_internal::UnalignedStore16(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
- (absl::base_internal::UnalignedStore32(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
- (absl::base_internal::UnalignedStore64(_p, _val))
-
-#endif // defined(__cplusplus), end of unaligned API
-
-#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+} // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+ (absl::base_internal::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+ (absl::base_internal::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+ (absl::base_internal::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+ (absl::base_internal::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+ (absl::base_internal::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+ (absl::base_internal::UnalignedStore64(_p, _val))
+
+#endif // defined(__cplusplus), end of unaligned API
+
+#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
index 8ca19a6106..4d352bd110 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.cc
@@ -1,69 +1,69 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/unscaledcycleclock.h"
-
-#if ABSL_USE_UNSCALED_CYCLECLOCK
-
-#if defined(_WIN32)
-#include <intrin.h>
-#endif
-
-#if defined(__powerpc__) || defined(__ppc__)
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#if defined(__powerpc__) || defined(__ppc__)
#ifdef __GLIBC__
#include <sys/platform/ppc.h>
#elif defined(__FreeBSD__)
#include <sys/sysctl.h>
#include <sys/types.h>
-#endif
#endif
-
-#include "absl/base/internal/sysinfo.h"
-
-namespace absl {
+#endif
+
+#include "absl/base/internal/sysinfo.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-#if defined(__i386__)
-
-int64_t UnscaledCycleClock::Now() {
- int64_t ret;
- __asm__ volatile("rdtsc" : "=A"(ret));
- return ret;
-}
-
-double UnscaledCycleClock::Frequency() {
- return base_internal::NominalCPUFrequency();
-}
-
-#elif defined(__x86_64__)
-
-int64_t UnscaledCycleClock::Now() {
- uint64_t low, high;
- __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- return (high << 32) | low;
-}
-
-double UnscaledCycleClock::Frequency() {
- return base_internal::NominalCPUFrequency();
-}
-
-#elif defined(__powerpc__) || defined(__ppc__)
-
-int64_t UnscaledCycleClock::Now() {
+namespace base_internal {
+
+#if defined(__i386__)
+
+int64_t UnscaledCycleClock::Now() {
+ int64_t ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ return ret;
+}
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__x86_64__)
+
+int64_t UnscaledCycleClock::Now() {
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+}
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__powerpc__) || defined(__ppc__)
+
+int64_t UnscaledCycleClock::Now() {
#ifdef __GLIBC__
- return __ppc_get_timebase();
+ return __ppc_get_timebase();
#else
#ifdef __powerpc64__
int64_t tbr;
@@ -82,11 +82,11 @@ int64_t UnscaledCycleClock::Now() {
return (static_cast<int64_t>(tbu) << 32) | tbl;
#endif
#endif
-}
-
-double UnscaledCycleClock::Frequency() {
+}
+
+double UnscaledCycleClock::Frequency() {
#ifdef __GLIBC__
- return __ppc_get_timebase_freq();
+ return __ppc_get_timebase_freq();
#elif defined(_AIX)
// This is the same constant value as returned by
// __ppc_get_timebase_freq().
@@ -103,26 +103,26 @@ double UnscaledCycleClock::Frequency() {
#else
#error Must implement UnscaledCycleClock::Frequency()
#endif
-}
-
-#elif defined(__aarch64__)
-
-// System timer of ARMv8 runs at a different frequency than the CPU's.
-// The frequency is fixed, typically in the range 1-50MHz. It can be
-// read at CNTFRQ special register. We assume the OS has set up
-// the virtual timer properly.
-int64_t UnscaledCycleClock::Now() {
- int64_t virtual_timer_value;
- asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
- return virtual_timer_value;
-}
-
-double UnscaledCycleClock::Frequency() {
- uint64_t aarch64_timer_frequency;
- asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
- return aarch64_timer_frequency;
-}
-
+}
+
+#elif defined(__aarch64__)
+
+// System timer of ARMv8 runs at a different frequency than the CPU's.
+// The frequency is fixed, typically in the range 1-50MHz. It can be
+// read at CNTFRQ special register. We assume the OS has set up
+// the virtual timer properly.
+int64_t UnscaledCycleClock::Now() {
+ int64_t virtual_timer_value;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+ uint64_t aarch64_timer_frequency;
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
+ return aarch64_timer_frequency;
+}
+
#elif defined(__riscv)
int64_t UnscaledCycleClock::Now() {
@@ -135,20 +135,20 @@ double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
-#elif defined(_M_IX86) || defined(_M_X64)
-
-#pragma intrinsic(__rdtsc)
-
+#elif defined(_M_IX86) || defined(_M_X64)
+
+#pragma intrinsic(__rdtsc)
+
int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
-
-double UnscaledCycleClock::Frequency() {
- return base_internal::NominalCPUFrequency();
-}
-
-#endif
-
-} // namespace base_internal
+
+double UnscaledCycleClock::Frequency() {
+ return base_internal::NominalCPUFrequency();
+}
+
+#endif
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+} // namespace absl
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
diff --git a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
index cb2c4485b6..681ff8f996 100644
--- a/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
+++ b/contrib/restricted/abseil-cpp/absl/base/internal/unscaledcycleclock.h
@@ -1,124 +1,124 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// UnscaledCycleClock
-// An UnscaledCycleClock yields the value and frequency of a cycle counter
-// that increments at a rate that is approximately constant.
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UnscaledCycleClock
+// An UnscaledCycleClock yields the value and frequency of a cycle counter
+// that increments at a rate that is approximately constant.
// This class is for internal use only, you should consider using CycleClock
// instead.
-//
-// Notes:
-// The cycle counter frequency is not necessarily the core clock frequency.
-// That is, CycleCounter cycles are not necessarily "CPU cycles".
-//
-// An arbitrary offset may have been added to the counter at power on.
-//
-// On some platforms, the rate and offset of the counter may differ
-// slightly when read from different CPUs of a multiprocessor. Usually,
-// we try to ensure that the operating system adjusts values periodically
-// so that values agree approximately. If you need stronger guarantees,
-// consider using alternate interfaces.
-//
-// The CPU is not required to maintain the ordering of a cycle counter read
-// with respect to surrounding instructions.
-
-#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
-#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
-
-#include <cstdint>
-
-#if defined(__APPLE__)
-#include <TargetConditionals.h>
-#endif
-
-#include "absl/base/port.h"
-
-// The following platforms have an implementation of a hardware counter.
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+//
+// Notes:
+// The cycle counter frequency is not necessarily the core clock frequency.
+// That is, CycleCounter cycles are not necessarily "CPU cycles".
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately. If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+
+#include <cstdint>
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+#include "absl/base/port.h"
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
defined(_M_IX86) || defined(_M_X64)
-#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
-#else
-#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
-#endif
-
-// The following platforms often disable access to the hardware
-// counter (through a sandbox) even if the underlying hardware has a
-// usable counter. The CycleTimer interface also requires a *scaled*
-// CycleClock that runs at atleast 1 MHz. We've found some Android
-// ARM64 devices where this is not the case, so we disable it by
-// default on Android ARM64.
-#if defined(__native_client__) || \
- (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
- (defined(__ANDROID__) && defined(__aarch64__))
-#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
-#else
-#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
-#endif
-
-// UnscaledCycleClock is an optional internal feature.
-// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
-// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
-#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
-#define ABSL_USE_UNSCALED_CYCLECLOCK \
- (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
- ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
-#endif
-
-#if ABSL_USE_UNSCALED_CYCLECLOCK
-
-// This macro can be used to test if UnscaledCycleClock::Frequency()
-// is NominalCPUFrequency() on a particular platform.
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) || \
+ (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
+ (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK \
+ (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+ ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
defined(_M_IX86) || defined(_M_X64))
-#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-#endif
-
-namespace absl {
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace time_internal {
-class UnscaledCycleClockWrapperForGetCurrentTime;
-} // namespace time_internal
-
-namespace base_internal {
-class CycleClock;
-class UnscaledCycleClockWrapperForInitializeFrequency;
-
-class UnscaledCycleClock {
- private:
- UnscaledCycleClock() = delete;
-
- // Return the value of a cycle counter that counts at a rate that is
- // approximately constant.
- static int64_t Now();
-
- // Return the how much UnscaledCycleClock::Now() increases per second.
- // This is not necessarily the core CPU clock frequency.
- // It may be the nominal value report by the kernel, rather than a measured
- // value.
- static double Frequency();
-
+namespace time_internal {
+class UnscaledCycleClockWrapperForGetCurrentTime;
+} // namespace time_internal
+
+namespace base_internal {
+class CycleClock;
+class UnscaledCycleClockWrapperForInitializeFrequency;
+
+class UnscaledCycleClock {
+ private:
+ UnscaledCycleClock() = delete;
+
+ // Return the value of a cycle counter that counts at a rate that is
+ // approximately constant.
+ static int64_t Now();
+
+ // Return the how much UnscaledCycleClock::Now() increases per second.
+ // This is not necessarily the core CPU clock frequency.
+ // It may be the nominal value report by the kernel, rather than a measured
+ // value.
+ static double Frequency();
+
// Allowed users
- friend class base_internal::CycleClock;
- friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
- friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
-};
-
-} // namespace base_internal
+ friend class base_internal::CycleClock;
+ friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
+ friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
+};
+
+} // namespace base_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_USE_UNSCALED_CYCLECLOCK
-
-#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+} // namespace absl
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+
+#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_