aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:43 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:43 +0300
commit397cbe258b9e064f49c4ca575279f02f39fef76e (patch)
treea0b0eb3cca6a14e4e8ea715393637672fa651284 /contrib/restricted/abseil-cpp-tstring/y_absl/base/internal
parent43f5a35593ebc9f6bcea619bb170394ea7ae468e (diff)
downloadydb-397cbe258b9e064f49c4ca575279f02f39fef76e.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp-tstring/y_absl/base/internal')
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h796
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h122
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h96
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h62
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc70
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h34
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc72
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h66
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc22
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h22
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc176
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h78
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc56
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h16
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc18
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h108
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc228
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h6
30 files changed, 1075 insertions, 1075 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
index 82be9f94ab..3f8a1eb8de 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/direct_mmap.h
@@ -61,10 +61,10 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
#endif
#endif // __BIONIC__
-#if defined(__NR_mmap2) && !defined(SYS_mmap2)
-#define SYS_mmap2 __NR_mmap2
-#endif
-
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
namespace y_absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
@@ -74,13 +74,13 @@ namespace base_internal {
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
- defined(__m68k__) || defined(__sh__) || \
- (defined(__hppa__) && !defined(__LP64__)) || \
+ defined(__m68k__) || defined(__sh__) || \
+ (defined(__hppa__) && !defined(__LP64__)) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \
- (defined(__riscv) && __riscv_xlen == 32) || \
- (defined(__s390__) && !defined(__s390x__)) || \
- (defined(__sparc__) && !defined(__arch64__))
+ (defined(__riscv) && __riscv_xlen == 32) || \
+ (defined(__s390__) && !defined(__s390x__)) || \
+ (defined(__sparc__) && !defined(__arch64__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0) {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h
index 75f0022683..56c5963e0e 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/dynamic_annotations.h
@@ -1,398 +1,398 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file defines dynamic annotations for use with dynamic analysis tool
-// such as valgrind, PIN, etc.
-//
-// Dynamic annotation is a source code annotation that affects the generated
-// code (that is, the annotation is not a comment). Each such annotation is
-// attached to a particular instruction and/or to a particular object (address)
-// in the program.
-//
-// The annotations that should be used by users are macros in all upper-case
-// (e.g., ANNOTATE_THREAD_NAME).
-//
-// Actual implementation of these macros may differ depending on the dynamic
-// analysis tool being used.
-//
-// This file supports the following configurations:
-// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
-// In this case, macros expand to functions implemented by Thread Sanitizer,
-// when building with TSan. When not provided an external implementation,
-// dynamic_annotations.cc provides no-op implementations.
-//
-// - Static Clang thread-safety warnings enabled.
-// When building with a Clang compiler that supports thread-safety warnings,
-// a subset of annotations can be statically-checked at compile-time. We
-// expand these macros to static-inline functions that can be analyzed for
-// thread-safety, but afterwards elided when building the final binary.
-//
-// - All annotations are disabled.
-// If neither Dynamic Annotations nor Clang thread-safety warnings are
-// enabled, then all annotation-macros expand to empty.
-
-#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
-#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
-
-#include <stddef.h>
-
-#include "y_absl/base/config.h"
-
-// -------------------------------------------------------------------------
-// Decide which features are enabled
-
-#ifndef DYNAMIC_ANNOTATIONS_ENABLED
-#define DYNAMIC_ANNOTATIONS_ENABLED 0
-#endif
-
-#if defined(__clang__) && !defined(SWIG)
-#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
-#endif
-
-#if DYNAMIC_ANNOTATIONS_ENABLED != 0
-
-#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
-#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
-#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
-#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
-#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
-
-#else
-
-#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
-#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
-#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
-
-// Clang provides limited support for static thread-safety analysis through a
-// feature called Annotalysis. We configure macro-definitions according to
-// whether Annotalysis support is available. When running in opt-mode, GCC
-// will issue a warning, if these attributes are compiled. Only include them
-// when compiling using Clang.
-
-// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
-#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
- defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
-// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
-#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
- ABSL_INTERNAL_ANNOTALYSIS_ENABLED
-#endif
-
-// Memory annotations are also made available to LLVM's Memory Sanitizer
-#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
-#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
-#endif
-
-#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
-#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
-#endif
-
-#ifdef __cplusplus
-#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
-#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
-#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
-#define ABSL_INTERNAL_STATIC_INLINE inline
-#else
-#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
-#define ABSL_INTERNAL_END_EXTERN_C // empty
-#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
-#define ABSL_INTERNAL_STATIC_INLINE static inline
-#endif
-
-// -------------------------------------------------------------------------
-// Define race annotations.
-
-#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
-
-// -------------------------------------------------------------
-// Annotations that suppress errors. It is usually better to express the
-// program's synchronization using the other annotations, but these can be used
-// when all else fails.
-
-// Report that we may have a benign race at `pointer`, with size
-// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
-// point where `pointer` has been allocated, preferably close to the point
-// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
-#define ANNOTATE_BENIGN_RACE(pointer, description) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
- (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
-
-// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
-// the memory range [`address`, `address`+`size`).
-#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
- (__FILE__, __LINE__, address, size, description)
-
-// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
-// This annotation could be useful if you want to skip expensive race analysis
-// during some period of program execution, e.g. during initialization.
-#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
- (__FILE__, __LINE__, enable)
-
-// -------------------------------------------------------------
-// Annotations useful for debugging.
-
-// Report the current thread `name` to a race detector.
-#define ANNOTATE_THREAD_NAME(name) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
-
-// -------------------------------------------------------------
-// Annotations useful when implementing locks. They are not normally needed by
-// modules that merely use locks. The `lock` argument is a pointer to the lock
-// object.
-
-// Report that a lock has been created at address `lock`.
-#define ANNOTATE_RWLOCK_CREATE(lock) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
-
-// Report that a linker initialized lock has been created at address `lock`.
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
- (__FILE__, __LINE__, lock)
-#else
-#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
-#endif
-
-// Report that the lock at address `lock` is about to be destroyed.
-#define ANNOTATE_RWLOCK_DESTROY(lock) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
-
-// Report that the lock at address `lock` has been acquired.
-// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
-#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
- (__FILE__, __LINE__, lock, is_w)
-
-// Report that the lock at address `lock` is about to be released.
-// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
-#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
- (__FILE__, __LINE__, lock, is_w)
-
-// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
-#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
- namespace { \
- class static_var##_annotator { \
- public: \
- static_var##_annotator() { \
- ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
- #static_var ": " description); \
- } \
- }; \
- static static_var##_annotator the##static_var##_annotator; \
- } // namespace
-
-#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
-
-#define ANNOTATE_RWLOCK_CREATE(lock) // empty
-#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
-#define ANNOTATE_RWLOCK_DESTROY(lock) // empty
-#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
-#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
-#define ANNOTATE_BENIGN_RACE(address, description) // empty
-#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
-#define ANNOTATE_THREAD_NAME(name) // empty
-#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
-#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
-
-#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
-
-// -------------------------------------------------------------------------
-// Define memory annotations.
-
-#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
-
-#include <sanitizer/msan_interface.h>
-
-#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
- __msan_unpoison(address, size)
-
-#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
- __msan_allocated_memory(address, size)
-
-#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
-
-#if DYNAMIC_ANNOTATIONS_ENABLED == 1
-#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
- do { \
- (void)(address); \
- (void)(size); \
- } while (0)
-#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
- do { \
- (void)(address); \
- (void)(size); \
- } while (0)
-#else
-#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
-#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
-#endif
-
-#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
-
-// -------------------------------------------------------------------------
-// Define IGNORE_READS_BEGIN/_END attributes.
-
-#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
-
-#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
- __attribute((exclusive_lock_function("*")))
-#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
- __attribute((unlock_function("*")))
-
-#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
-
-#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
-#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
-
-#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
-
-// -------------------------------------------------------------------------
-// Define IGNORE_READS_BEGIN/_END annotations.
-
-#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
-
-// Request the analysis tool to ignore all reads in the current thread until
-// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
-// reads, while still checking other reads and all writes.
-// See also ANNOTATE_UNPROTECTED_READ.
-#define ANNOTATE_IGNORE_READS_BEGIN() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
-
-// Stop ignoring reads.
-#define ANNOTATE_IGNORE_READS_END() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
-
-#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
-
-// When Annotalysis is enabled without Dynamic Annotations, the use of
-// static-inline functions allows the annotations to be read at compile-time,
-// while still letting the compiler elide the functions from the final build.
-//
-// TODO(delesley) -- The exclusive lock here ignores writes as well, but
-// allows IGNORE_READS_AND_WRITES to work properly.
-
-#define ANNOTATE_IGNORE_READS_BEGIN() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
-
-#define ANNOTATE_IGNORE_READS_END() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
-
-#else
-
-#define ANNOTATE_IGNORE_READS_BEGIN() // empty
-#define ANNOTATE_IGNORE_READS_END() // empty
-
-#endif
-
-// -------------------------------------------------------------------------
-// Define IGNORE_WRITES_BEGIN/_END annotations.
-
-#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
-
-// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
-#define ANNOTATE_IGNORE_WRITES_BEGIN() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
-
-// Stop ignoring writes.
-#define ANNOTATE_IGNORE_WRITES_END() \
- ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
-
-#else
-
-#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty
-#define ANNOTATE_IGNORE_WRITES_END() // empty
-
-#endif
-
-// -------------------------------------------------------------------------
-// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
-// primitive annotations defined above.
-//
-// Instead of doing
-// ANNOTATE_IGNORE_READS_BEGIN();
-// ... = x;
-// ANNOTATE_IGNORE_READS_END();
-// one can use
-// ... = ANNOTATE_UNPROTECTED_READ(x);
-
-#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
-
-// Start ignoring all memory accesses (both reads and writes).
-#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
- do { \
- ANNOTATE_IGNORE_READS_BEGIN(); \
- ANNOTATE_IGNORE_WRITES_BEGIN(); \
- } while (0)
-
-// Stop ignoring both reads and writes.
-#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
- do { \
- ANNOTATE_IGNORE_WRITES_END(); \
- ANNOTATE_IGNORE_READS_END(); \
- } while (0)
-
-#ifdef __cplusplus
-// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
-#define ANNOTATE_UNPROTECTED_READ(x) \
- y_absl::base_internal::AnnotateUnprotectedRead(x)
-
-#endif
-
-#else
-
-#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
-#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
-#define ANNOTATE_UNPROTECTED_READ(x) (x)
-
-#endif
-
-// -------------------------------------------------------------------------
-// Address sanitizer annotations
-
-#ifdef ABSL_HAVE_ADDRESS_SANITIZER
-// Describe the current state of a contiguous container such as e.g.
-// std::vector or TString. For more details see
-// sanitizer/common_interface_defs.h, which is provided by the compiler.
-#include <sanitizer/common_interface_defs.h>
-
-#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
- __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
-#define ADDRESS_SANITIZER_REDZONE(name) \
- struct { \
- char x[8] __attribute__((aligned(8))); \
- } name
-
-#else
-
-#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
-#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
-
-#endif // ABSL_HAVE_ADDRESS_SANITIZER
-
-// -------------------------------------------------------------------------
-// Undefine the macros intended only for this file.
-
-#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
-#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
-#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
-#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
-#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
-#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
-#undef ABSL_INTERNAL_BEGIN_EXTERN_C
-#undef ABSL_INTERNAL_END_EXTERN_C
-#undef ABSL_INTERNAL_STATIC_INLINE
-
-#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+
+#include "y_absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+#define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__clang__) && !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
+ defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#endif
+
+// Memory annotations are also made available to LLVM's Memory Sanitizer
+#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
+#endif
+
+#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
+#endif
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
+#define ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ANNOTATE_THREAD_NAME(name) // empty
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
+
+#include <sanitizer/msan_interface.h>
+
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#else
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+#endif
+
+#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ANNOTATE_UNPROTECTED_READ.
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+
+#else
+
+#define ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+#else
+
+#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ANNOTATE_IGNORE_READS_BEGIN(); \
+ ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ANNOTATE_IGNORE_WRITES_END(); \
+ ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ANNOTATE_UNPROTECTED_READ(x) \
+ y_absl::base_internal::AnnotateUnprotectedRead(x)
+
+#endif
+
+#else
+
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or TString. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ char x[8] __attribute__((aligned(8))); \
+ } name
+
+#else
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
index 0f7adb8bf6..d11a9a5ae8 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/endian.h
@@ -26,7 +26,7 @@
#endif
#include <cstdint>
-#include "y_absl/base/casts.h"
+#include "y_absl/base/casts.h"
#include "y_absl/base/config.h"
#include "y_absl/base/internal/unaligned_access.h"
#include "y_absl/base/port.h"
@@ -174,36 +174,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
-inline uint8_t FromHost(uint8_t x) { return x; }
-inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
-inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
-inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
-inline uint8_t ToHost(uint8_t x) { return x; }
-inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
-inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
-inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
-
-inline int8_t FromHost(int8_t x) { return x; }
-inline int16_t FromHost(int16_t x) {
- return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t FromHost(int32_t x) {
- return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t FromHost(int64_t x) {
- return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
-}
-inline int8_t ToHost(int8_t x) { return x; }
-inline int16_t ToHost(int16_t x) {
- return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t ToHost(int32_t x) {
- return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t ToHost(int64_t x) {
- return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
-}
-
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
@@ -264,36 +264,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
-inline uint8_t FromHost(uint8_t x) { return x; }
-inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
-inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
-inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
-inline uint8_t ToHost(uint8_t x) { return x; }
-inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
-inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
-inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
-
-inline int8_t FromHost(int8_t x) { return x; }
-inline int16_t FromHost(int16_t x) {
- return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t FromHost(int32_t x) {
- return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t FromHost(int64_t x) {
- return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
-}
-inline int8_t ToHost(int8_t x) { return x; }
-inline int16_t ToHost(int16_t x) {
- return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
-}
-inline int32_t ToHost(int32_t x) {
- return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
-}
-inline int64_t ToHost(int64_t x) {
- return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
-}
-
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
index c82cba7b4a..1ab47fc376 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/fast_type_id.h
@@ -1,48 +1,48 @@
-//
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
-#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
-
-#include "y_absl/base/config.h"
-
-namespace y_absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-template <typename Type>
-struct FastTypeTag {
- constexpr static char dummy_var = 0;
-};
-
-template <typename Type>
-constexpr char FastTypeTag<Type>::dummy_var;
-
-// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
-// passed-in type. These are meant to be good match for keys into maps or
-// straight up comparisons.
-using FastTypeIdType = const void*;
-
-template <typename Type>
-constexpr inline FastTypeIdType FastTypeId() {
- return &FastTypeTag<Type>::dummy_var;
-}
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace y_absl
-
-#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Type>
+struct FastTypeTag {
+ constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char FastTypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
+using FastTypeIdType = const void*;
+
+template <typename Type>
+constexpr inline FastTypeIdType FastTypeId() {
+ return &FastTypeTag<Type>::dummy_var;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
index 928666ff44..21f6c309a1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/invoke.h
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-// y_absl::base_internal::invoke(f, args...) is an implementation of
+// y_absl::base_internal::invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard.
//
// [func.require]
@@ -29,7 +29,7 @@
// is not one of the types described in the previous item;
// 5. f(t1, t2, ..., tN) in all other cases.
//
-// The implementation is SFINAE-friendly: substitution failure within invoke()
+// The implementation is SFINAE-friendly: substitution failure within invoke()
// isn't an error.
#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
@@ -170,13 +170,13 @@ struct Invoker {
// The result type of Invoke<F, Args...>.
template <typename F, typename... Args>
-using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
+using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...));
// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard.
template <typename F, typename... Args>
-invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
+invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
index 0c477d1b28..e1e439cc04 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc.cc
@@ -598,7 +598,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
section.Leave();
result = &s->levels;
}
- ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+ ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make
index fabba74677..77fcf095a4 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc/ya.make
@@ -2,13 +2,13 @@
LIBRARY()
-WITHOUT_LICENSE_TEXTS()
-
-OWNER(
- somov
- g:cpp-contrib
-)
+WITHOUT_LICENSE_TEXTS()
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
LICENSE(Apache-2.0)
PEERDIR(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h
index 61eb4ac643..012b199385 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_scheduling.h
@@ -18,7 +18,7 @@
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
-#include "y_absl/base/internal/raw_logging.h"
+#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/scheduling_mode.h"
#include "y_absl/base/macros.h"
@@ -30,13 +30,13 @@ extern "C" void __google_enable_rescheduling(bool disable_result);
namespace y_absl {
ABSL_NAMESPACE_BEGIN
-class CondVar;
-class Mutex;
-
-namespace synchronization_internal {
-int MutexDelay(int32_t c, int mode);
-} // namespace synchronization_internal
-
+class CondVar;
+class Mutex;
+
+namespace synchronization_internal {
+int MutexDelay(int32_t c, int mode);
+} // namespace synchronization_internal
+
namespace base_internal {
class SchedulingHelper; // To allow use of SchedulingGuard.
@@ -61,8 +61,8 @@ class SchedulingGuard {
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
- SchedulingGuard(const SchedulingGuard&) = delete;
- SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+ SchedulingGuard(const SchedulingGuard&) = delete;
+ SchedulingGuard& operator=(const SchedulingGuard&) = delete;
private:
// Disable cooperative rescheduling of the calling thread. It may still
@@ -86,23 +86,23 @@ class SchedulingGuard {
bool disabled;
};
- // A scoped helper to enable rescheduling temporarily.
- // REQUIRES: destructor must run in same thread as constructor.
- class ScopedEnable {
- public:
- ScopedEnable();
- ~ScopedEnable();
-
- private:
- int scheduling_disabled_depth_;
- };
-
- // Access to SchedulingGuard is explicitly permitted.
- friend class y_absl::CondVar;
- friend class y_absl::Mutex;
+ // A scoped helper to enable rescheduling temporarily.
+ // REQUIRES: destructor must run in same thread as constructor.
+ class ScopedEnable {
+ public:
+ ScopedEnable();
+ ~ScopedEnable();
+
+ private:
+ int scheduling_disabled_depth_;
+ };
+
+ // Access to SchedulingGuard is explicitly permitted.
+ friend class y_absl::CondVar;
+ friend class y_absl::Mutex;
friend class SchedulingHelper;
friend class SpinLock;
- friend int y_absl::synchronization_internal::MutexDelay(int32_t c, int mode);
+ friend int y_absl::synchronization_internal::MutexDelay(int32_t c, int mode);
};
//------------------------------------------------------------------------------
@@ -121,12 +121,12 @@ inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
return;
}
-inline SchedulingGuard::ScopedEnable::ScopedEnable()
- : scheduling_disabled_depth_(0) {}
-inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
- ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
-}
-
+inline SchedulingGuard::ScopedEnable::ScopedEnable()
+ : scheduling_disabled_depth_(0) {}
+inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
+ ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
+}
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
index ea9a48c2c0..4fe222e723 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.cc
@@ -67,32 +67,32 @@
#undef ABSL_HAVE_RAW_IO
#endif
-namespace y_absl {
-ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
-namespace {
-
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+namespace {
+
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
-// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
-// a selected set of platforms for which we expect not to be able to raw log.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- y_absl::base_internal::AtomicHook<LogPrefixHook>
- log_prefix_hook;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- y_absl::base_internal::AtomicHook<AbortHook>
- abort_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ y_absl::base_internal::AtomicHook<LogPrefixHook>
+ log_prefix_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ y_absl::base_internal::AtomicHook<AbortHook>
+ abort_hook;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
-constexpr char kTruncated[] = " ... (message truncated)\n";
+constexpr char kTruncated[] = " ... (message truncated)\n";
// sprintf the format to the buffer, adjusting *buf and *size to reflect the
// consumed bytes, and return whether the message fit without truncation. If
// truncation occurred, if possible leave room in the buffer for the message
// kTruncated[].
-bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
- ABSL_PRINTF_ATTRIBUTE(3, 0);
-bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+ ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
int n = vsnprintf(*buf, *size, format, ap);
bool result = true;
if (n < 0 || n > *size) {
@@ -100,7 +100,7 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - sizeof(kTruncated); // room for truncation message
} else {
- n = 0; // no room for truncation message
+ n = 0; // no room for truncation message
}
}
*size -= n;
@@ -109,7 +109,7 @@ bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
}
#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
-constexpr int kLogBufSize = 3000;
+constexpr int kLogBufSize = 3000;
// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
// that invoke malloc() and getenv() that might acquire some locks.
@@ -168,7 +168,7 @@ void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- SafeWriteToStderr(buffer, strlen(buffer));
+ SafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
@@ -183,16 +183,16 @@ void RawLogVA(y_absl::LogSeverity severity, const char* file, int line,
}
}
-// Non-formatting version of RawLog().
-//
-// TODO(gfalcon): When string_view no longer depends on base, change this
-// interface to take its message as a string_view instead.
-void DefaultInternalLog(y_absl::LogSeverity severity, const char* file, int line,
- const TString& message) {
- RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
- message.data());
-}
-
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(y_absl::LogSeverity severity, const char* file, int line,
+ const TString& message) {
+ RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+ message.data());
+}
+
} // namespace
void SafeWriteToStderr(const char *s, size_t len) {
@@ -225,14 +225,14 @@ bool RawLoggingFullySupported() {
#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
}
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
y_absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
-void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
-
-void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
-
+void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
index 4d5c77003f..cd6603335b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging.h
@@ -72,14 +72,14 @@
//
// The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message.
-#define ABSL_INTERNAL_LOG(severity, message) \
- do { \
- constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
- ::y_absl::raw_logging_internal::internal_log_function( \
- ABSL_RAW_LOGGING_INTERNAL_##severity, \
- absl_raw_logging_internal_filename, __LINE__, message); \
- if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::y_absl::LogSeverity::kFatal) \
- ABSL_INTERNAL_UNREACHABLE; \
+#define ABSL_INTERNAL_LOG(severity, message) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
+ ::y_absl::raw_logging_internal::internal_log_function( \
+ ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_filename, __LINE__, message); \
+ if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::y_absl::LogSeverity::kFatal) \
+ ABSL_INTERNAL_UNREACHABLE; \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
@@ -174,18 +174,18 @@ using InternalLogFunction = void (*)(y_absl::LogSeverity severity,
const char* file, int line,
const TString& message);
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
-// Registers hooks of the above types. Only a single hook of each type may be
-// registered. It is an error to call these functions multiple times with
-// different input arguments.
-//
-// These functions are safe to call at any point during initialization; they do
-// not block or malloc, and are async-signal safe.
-void RegisterLogPrefixHook(LogPrefixHook func);
-void RegisterAbortHook(AbortHook func);
+// Registers hooks of the above types. Only a single hook of each type may be
+// registered. It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);
} // namespace raw_logging_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make
index e7cfe7d216..dbfdc41134 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/raw_logging/ya.make
@@ -2,13 +2,13 @@
LIBRARY()
-WITHOUT_LICENSE_TEXTS()
-
-OWNER(
- somov
- g:cpp-contrib
-)
+WITHOUT_LICENSE_TEXTS()
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
LICENSE(Apache-2.0)
PEERDIR(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
index 2ee7cde432..ee7e4476c1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.cc
@@ -66,13 +66,13 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
submit_profile_data.Store(fn);
}
-// Static member variable definitions.
-constexpr uint32_t SpinLock::kSpinLockHeld;
-constexpr uint32_t SpinLock::kSpinLockCooperative;
-constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
-constexpr uint32_t SpinLock::kSpinLockSleeper;
-constexpr uint32_t SpinLock::kWaitTimeMask;
-
+// Static member variable definitions.
+constexpr uint32_t SpinLock::kSpinLockHeld;
+constexpr uint32_t SpinLock::kSpinLockCooperative;
+constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
+constexpr uint32_t SpinLock::kSpinLockSleeper;
+constexpr uint32_t SpinLock::kWaitTimeMask;
+
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
@@ -105,14 +105,14 @@ void SpinLock::SlowLock() {
if ((lock_value & kSpinLockHeld) == 0) {
return;
}
-
- base_internal::SchedulingMode scheduling_mode;
- if ((lock_value & kSpinLockCooperative) != 0) {
- scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
- } else {
- scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
- }
-
+
+ base_internal::SchedulingMode scheduling_mode;
+ if ((lock_value & kSpinLockCooperative) != 0) {
+ scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+ } else {
+ scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+ }
+
// The lock was not obtained initially, so this thread needs to wait for
// it. Record the current timestamp in the local variable wait_start_time
// so the total wait time can be stored in the lockword once this thread
@@ -125,9 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
- // lock wait time in the lock -- the lock word stores the amount of time
- // that the current holder waited before acquiring the lock, not the wait
- // time of any thread currently waiting to acquire it.
+ // lock wait time in the lock -- the lock word stores the amount of time
+ // that the current holder waited before acquiring the lock, not the wait
+ // time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
@@ -141,14 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
- } else if ((lock_value & kWaitTimeMask) == 0) {
- // The lock is still held, without a waiter being marked, but something
- // else about the lock word changed, causing our CAS to fail. For
- // example, a new lock holder may have acquired the lock with
- // kSpinLockDisabledScheduling set, whereas the previous holder had not
- // set that flag. In this case, attempt again to mark ourselves as a
- // waiter.
- continue;
+ } else if ((lock_value & kWaitTimeMask) == 0) {
+ // The lock is still held, without a waiter being marked, but something
+ // else about the lock word changed, causing our CAS to fail. For
+ // example, a new lock holder may have acquired the lock with
+ // kSpinLockDisabledScheduling set, whereas the previous holder had not
+ // set that flag. In this case, attempt again to mark ourselves as a
+ // waiter.
+ continue;
}
}
@@ -185,32 +185,32 @@ void SpinLock::SlowUnlock(uint32_t lock_value) {
// We use the upper 29 bits of the lock word to store the time spent waiting to
// acquire this lock. This is reported by contentionz profiling. Since the
// lower bits of the cycle counter wrap very quickly on high-frequency
-// processors we divide to reduce the granularity to 2^kProfileTimestampShift
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
// sized units. On a 4Ghz machine this will lose track of wait times greater
// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
-static constexpr int kProfileTimestampShift = 7;
-
-// We currently reserve the lower 3 bits.
-static constexpr int kLockwordReservedShift = 3;
+static constexpr int kProfileTimestampShift = 7;
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
+
uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time) {
static const int64_t kMaxWaitTime =
- std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
+ std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
int64_t scaled_wait_time =
- (wait_end_time - wait_start_time) >> kProfileTimestampShift;
+ (wait_end_time - wait_start_time) >> kProfileTimestampShift;
// Return a representation of the time spent waiting that can be stored in
// the lock word's upper bits.
uint32_t clamped = static_cast<uint32_t>(
- std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
+ std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
if (clamped == 0) {
return kSpinLockSleeper; // Just wake waiters, but don't record contention.
}
// Bump up value if necessary to avoid returning kSpinLockSleeper.
const uint32_t kMinWaitTime =
- kSpinLockSleeper + (1 << kLockwordReservedShift);
+ kSpinLockSleeper + (1 << kLockwordReservedShift);
if (clamped == kSpinLockSleeper) {
return kMinWaitTime;
}
@@ -221,7 +221,7 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
// Cast to uint32_t first to ensure bits [63:32] are cleared.
const uint64_t scaled_wait_time =
static_cast<uint32_t>(lock_value & kWaitTimeMask);
- return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
+ return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
}
} // namespace base_internal
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
index ef88cb52c0..3b636840d2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock.h
@@ -15,7 +15,7 @@
//
// Most users requiring mutual exclusion should use Mutex.
-// SpinLock is provided for use in two situations:
+// SpinLock is provided for use in two situations:
// - for use by Abseil internal code that Mutex itself depends on
// - for async signal safety (see below)
@@ -35,7 +35,7 @@
#include <atomic>
#include "y_absl/base/attributes.h"
-#include "y_absl/base/const_init.h"
+#include "y_absl/base/const_init.h"
#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/internal/low_level_scheduling.h"
#include "y_absl/base/internal/raw_logging.h"
@@ -59,18 +59,18 @@ class ABSL_LOCKABLE SpinLock {
// inside thread schedulers. Normal clients should not use these.
explicit SpinLock(base_internal::SchedulingMode mode);
- // Constructor for global SpinLock instances. See y_absl/base/const_init.h.
- constexpr SpinLock(y_absl::ConstInitType, base_internal::SchedulingMode mode)
- : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
-
- // For global SpinLock instances prefer trivial destructor when possible.
- // Default but non-trivial destructor in some build configurations causes an
- // extra static initializer.
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+ // Constructor for global SpinLock instances. See y_absl/base/const_init.h.
+ constexpr SpinLock(y_absl::ConstInitType, base_internal::SchedulingMode mode)
+ : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
+
+ // For global SpinLock instances prefer trivial destructor when possible.
+ // Default but non-trivial destructor in some build configurations causes an
+ // extra static initializer.
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
-#else
- ~SpinLock() = default;
-#endif
+#else
+ ~SpinLock() = default;
+#endif
// Acquire this SpinLock.
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
@@ -139,27 +139,27 @@ class ABSL_LOCKABLE SpinLock {
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
- // bit[2] encodes whether the current lock holder disabled scheduling when
- // acquiring the lock. Only set when kSpinLockHeld is also set.
+ // bit[2] encodes whether the current lock holder disabled scheduling when
+ // acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
- // This is set by the lock holder to indicate how long it waited on
- // the lock before eventually acquiring it. The number of cycles is
- // encoded as a 29-bit unsigned int, or in the case that the current
- // holder did not wait but another waiter is queued, the LSB
- // (kSpinLockSleeper) is set. The implementation does not explicitly
- // track the number of queued waiters beyond this. It must always be
- // assumed that waiters may exist if the current holder was required to
- // queue.
- //
- // Invariant: if the lock is not held, the value is either 0 or
- // kSpinLockCooperative.
- static constexpr uint32_t kSpinLockHeld = 1;
- static constexpr uint32_t kSpinLockCooperative = 2;
- static constexpr uint32_t kSpinLockDisabledScheduling = 4;
- static constexpr uint32_t kSpinLockSleeper = 8;
- // Includes kSpinLockSleeper.
- static constexpr uint32_t kWaitTimeMask =
- ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
+ // This is set by the lock holder to indicate how long it waited on
+ // the lock before eventually acquiring it. The number of cycles is
+ // encoded as a 29-bit unsigned int, or in the case that the current
+ // holder did not wait but another waiter is queued, the LSB
+ // (kSpinLockSleeper) is set. The implementation does not explicitly
+ // track the number of queued waiters beyond this. It must always be
+ // assumed that waiters may exist if the current holder was required to
+ // queue.
+ //
+ // Invariant: if the lock is not held, the value is either 0 or
+ // kSpinLockCooperative.
+ static constexpr uint32_t kSpinLockHeld = 1;
+ static constexpr uint32_t kSpinLockCooperative = 2;
+ static constexpr uint32_t kSpinLockDisabledScheduling = 4;
+ static constexpr uint32_t kSpinLockSleeper = 8;
+ // Includes kSpinLockSleeper.
+ static constexpr uint32_t kWaitTimeMask =
+ ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
// Returns true if the provided scheduling mode is cooperative.
static constexpr bool IsCooperative(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc
index 69955dc765..bf2bfcac7c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_akaros.inc
@@ -20,7 +20,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, y_absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a
@@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
// arbitrary code.
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
index 5b4480d133..a2237a3f48 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_linux.inc
@@ -46,17 +46,17 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
#endif
#endif
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
y_absl::base_internal::SchedulingMode) {
y_absl::base_internal::ErrnoSaver errno_saver;
@@ -66,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t> *w, bool all) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t> *w, bool all) {
syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc
index 12a9b86599..0c692f2d3d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_posix.inc
@@ -25,7 +25,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
y_absl::base_internal::SchedulingMode /* mode */) {
y_absl::base_internal::ErrnoSaver errno_saver;
@@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
}
}
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h
index 2e34d7026b..782361d0c0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait.h
@@ -45,16 +45,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
-// is true, wake all such threads. On some systems, this may be a no-op; on
-// those systems, threads calling SpinLockDelay() will always wake eventually
-// even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a call to SpinLockWake(w).
+// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
@@ -73,23 +73,23 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
- bool all);
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+ bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
y_absl::base_internal::SchedulingMode scheduling_mode);
}
inline void y_absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
}
inline void y_absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
y_absl::base_internal::SchedulingMode scheduling_mode) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
- (w, value, loop, scheduling_mode);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+ (w, value, loop, scheduling_mode);
}
#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make
index 902ffe394f..a31df9455b 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_wait/ya.make
@@ -2,13 +2,13 @@
LIBRARY()
-WITHOUT_LICENSE_TEXTS()
-
-OWNER(
- somov
- g:cpp-contrib
-)
+WITHOUT_LICENSE_TEXTS()
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
LICENSE(Apache-2.0)
ADDINCL(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc
index 648f74134f..3d5d662335 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/spinlock_win32.inc
@@ -20,9 +20,9 @@
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
- y_absl::base_internal::SchedulingMode /* mode */) {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ y_absl::base_internal::SchedulingMode /* mode */) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
@@ -31,7 +31,7 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
}
}
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
- std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc
index fe50d84a03..5d8deec11a 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.cc
@@ -1,88 +1,88 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "y_absl/base/internal/strerror.h"
-
-#include <array>
-#include <cerrno>
-#include <cstddef>
-#include <cstdio>
-#include <cstring>
-#include <util/generic/string.h>
-#include <type_traits>
-
-#include "y_absl/base/internal/errno_saver.h"
-
-namespace y_absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-namespace {
-
-const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
-#if defined(_WIN32)
- int rc = strerror_s(buf, buflen, errnum);
- buf[buflen - 1] = '\0'; // guarantee NUL termination
- if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
- return buf;
-#else
- // The type of `ret` is platform-specific; both of these branches must compile
- // either way but only one will execute on any given platform:
- auto ret = strerror_r(errnum, buf, buflen);
- if (std::is_same<decltype(ret), int>::value) {
- // XSI `strerror_r`; `ret` is `int`:
- if (ret) *buf = '\0';
- return buf;
- } else {
- // GNU `strerror_r`; `ret` is `char *`:
- return reinterpret_cast<const char*>(ret);
- }
-#endif
-}
-
-TString StrErrorInternal(int errnum) {
- char buf[100];
- const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
- if (*str == '\0') {
- snprintf(buf, sizeof buf, "Unknown error %d", errnum);
- str = buf;
- }
- return str;
-}
-
-// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
-// to `StrErrorAdaptor()` if the value is larger than this.
-constexpr int kSysNerr = 135;
-
-std::array<TString, kSysNerr>* NewStrErrorTable() {
- auto* table = new std::array<TString, kSysNerr>;
- for (int i = 0; i < static_cast<int>(table->size()); ++i) {
- (*table)[i] = StrErrorInternal(i);
- }
- return table;
-}
-
-} // namespace
-
-TString StrError(int errnum) {
- y_absl::base_internal::ErrnoSaver errno_saver;
- static const auto* table = NewStrErrorTable();
- if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
- return (*table)[errnum];
- }
- return StrErrorInternal(errnum);
-}
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace y_absl
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "y_absl/base/internal/strerror.h"
+
+#include <array>
+#include <cerrno>
+#include <cstddef>
+#include <cstdio>
+#include <cstring>
+#include <util/generic/string.h>
+#include <type_traits>
+
+#include "y_absl/base/internal/errno_saver.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
+#if defined(_WIN32)
+ int rc = strerror_s(buf, buflen, errnum);
+ buf[buflen - 1] = '\0'; // guarantee NUL termination
+ if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
+ return buf;
+#else
+ // The type of `ret` is platform-specific; both of these branches must compile
+ // either way but only one will execute on any given platform:
+ auto ret = strerror_r(errnum, buf, buflen);
+ if (std::is_same<decltype(ret), int>::value) {
+ // XSI `strerror_r`; `ret` is `int`:
+ if (ret) *buf = '\0';
+ return buf;
+ } else {
+ // GNU `strerror_r`; `ret` is `char *`:
+ return reinterpret_cast<const char*>(ret);
+ }
+#endif
+}
+
+TString StrErrorInternal(int errnum) {
+ char buf[100];
+ const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
+ if (*str == '\0') {
+ snprintf(buf, sizeof buf, "Unknown error %d", errnum);
+ str = buf;
+ }
+ return str;
+}
+
+// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
+// to `StrErrorAdaptor()` if the value is larger than this.
+constexpr int kSysNerr = 135;
+
+std::array<TString, kSysNerr>* NewStrErrorTable() {
+ auto* table = new std::array<TString, kSysNerr>;
+ for (int i = 0; i < static_cast<int>(table->size()); ++i) {
+ (*table)[i] = StrErrorInternal(i);
+ }
+ return table;
+}
+
+} // namespace
+
+TString StrError(int errnum) {
+ y_absl::base_internal::ErrnoSaver errno_saver;
+ static const auto* table = NewStrErrorTable();
+ if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
+ return (*table)[errnum];
+ }
+ return StrErrorInternal(errnum);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h
index a80a7b9c35..00479ab9eb 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/strerror.h
@@ -1,39 +1,39 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
-#define ABSL_BASE_INTERNAL_STRERROR_H_
-
-#include <util/generic/string.h>
-
-#include "y_absl/base/config.h"
-
-namespace y_absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-// A portable and thread-safe alternative to C89's `strerror`.
-//
-// The C89 specification of `strerror` is not suitable for use in a
-// multi-threaded application as the returned string may be changed by calls to
-// `strerror` from another thread. The many non-stdlib alternatives differ
-// enough in their names, availability, and semantics to justify this wrapper
-// around them. `errno` will not be modified by a call to `y_absl::StrError`.
-TString StrError(int errnum);
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace y_absl
-
-#endif // ABSL_BASE_INTERNAL_STRERROR_H_
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
+#define ABSL_BASE_INTERNAL_STRERROR_H_
+
+#include <util/generic/string.h>
+
+#include "y_absl/base/config.h"
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A portable and thread-safe alternative to C89's `strerror`.
+//
+// The C89 specification of `strerror` is not suitable for use in a
+// multi-threaded application as the returned string may be changed by calls to
+// `strerror` from another thread. The many non-stdlib alternatives differ
+// enough in their names, availability, and semantics to justify this wrapper
+// around them. `errno` will not be modified by a call to `y_absl::StrError`.
+TString StrError(int errnum);
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_BASE_INTERNAL_STRERROR_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
index 9eb0cf3f8c..ab4bcee26c 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.cc
@@ -39,7 +39,7 @@
#endif
#include <string.h>
-
+
#include <cassert>
#include <cstdint>
#include <cstdio>
@@ -51,11 +51,11 @@
#include <vector>
#include "y_absl/base/call_once.h"
-#include "y_absl/base/config.h"
+#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/spinlock.h"
#include "y_absl/base/internal/unscaledcycleclock.h"
-#include "y_absl/base/thread_annotations.h"
+#include "y_absl/base/thread_annotations.h"
namespace y_absl {
ABSL_NAMESPACE_BEGIN
@@ -144,12 +144,12 @@ static int GetNumCPUs() {
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
- !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
- // UWP apps don't have access to the registry and currently don't provide an
- // API informing about CPU nominal frequency.
- return 1.0;
-#else
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+ // UWP apps don't have access to the registry and currently don't provide an
+ // API informing about CPU nominal frequency.
+ return 1.0;
+#else
#pragma comment(lib, "advapi32.lib") // For Reg* functions.
HKEY key;
// Use the Reg* functions rather than the SH functions because shlwapi.dll
@@ -169,7 +169,7 @@ static double GetNominalCPUFrequency() {
}
}
return 1.0;
-#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
@@ -415,16 +415,16 @@ pid_t GetTID() {
#else
// Fallback implementation of GetTID using pthread_getspecific.
-ABSL_CONST_INIT static once_flag tid_once;
-ABSL_CONST_INIT static pthread_key_t tid_key;
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock tid_lock(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static once_flag tid_once;
+ABSL_CONST_INIT static pthread_key_t tid_key;
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock tid_lock(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// We set a bit per thread in this array to indicate that an ID is in
// use. ID 0 is unused because it is the default value returned by
// pthread_getspecific().
-ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
- ABSL_GUARDED_BY(tid_lock) = nullptr;
+ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
+ ABSL_GUARDED_BY(tid_lock) = nullptr;
static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array.
@@ -491,18 +491,18 @@ pid_t GetTID() {
#endif
-// GetCachedTID() caches the thread ID in thread-local storage (which is a
-// userspace construct) to avoid unnecessary system calls. Without this caching,
-// it can take roughly 98ns, while it takes roughly 1ns with this caching.
-pid_t GetCachedTID() {
-#ifdef ABSL_HAVE_THREAD_LOCAL
- static thread_local pid_t thread_id = GetTID();
- return thread_id;
-#else
- return GetTID();
-#endif // ABSL_HAVE_THREAD_LOCAL
-}
-
+// GetCachedTID() caches the thread ID in thread-local storage (which is a
+// userspace construct) to avoid unnecessary system calls. Without this caching,
+// it can take roughly 98ns, while it takes roughly 1ns with this caching.
+pid_t GetCachedTID() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+ static thread_local pid_t thread_id = GetTID();
+ return thread_id;
+#else
+ return GetTID();
+#endif // ABSL_HAVE_THREAD_LOCAL
+}
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h
index 0fd7207a38..bb20639029 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/sysinfo.h
@@ -30,7 +30,7 @@
#include <cstdint>
-#include "y_absl/base/config.h"
+#include "y_absl/base/config.h"
#include "y_absl/base/port.h"
namespace y_absl {
@@ -60,13 +60,13 @@ using pid_t = uint32_t;
#endif
pid_t GetTID();
-// Like GetTID(), but caches the result in thread-local storage in order
-// to avoid unnecessary system calls. Note that there are some cases where
-// one must call through to GetTID directly, which is why this exists as a
-// separate function. For example, GetCachedTID() is not safe to call in
-// an asynchronous signal-handling context nor right after a call to fork().
-pid_t GetCachedTID();
-
+// Like GetTID(), but caches the result in thread-local storage in order
+// to avoid unnecessary system calls. Note that there are some cases where
+// one must call through to GetTID directly, which is why this exists as a
+// separate function. For example, GetCachedTID() is not safe to call in
+// an asynchronous signal-handling context nor right after a call to fork().
+pid_t GetCachedTID();
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
index b5e88ae302..5182ae164d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.cc
@@ -23,7 +23,7 @@
#include <cassert>
#include <memory>
-#include "y_absl/base/attributes.h"
+#include "y_absl/base/attributes.h"
#include "y_absl/base/call_once.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/spinlock.h"
@@ -54,11 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr.
-// Apple platforms have the visibility attribute, but issue a compile warning
-// that protected visibility is unsupported.
-#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected")))
-#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
@@ -120,10 +120,10 @@ void SetCurrentThreadIdentity(
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
// Please see the comment on `CurrentThreadIdentityIfPresent` in
-// thread_identity.h. When we cannot expose thread_local variables in
-// headers, we opt for the correct-but-slower option of not inlining this
-// function.
-#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+// thread_identity.h. When we cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of not inlining this
+// function.
+#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
#endif
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
index 09a6c0bce1..9e9da41303 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/thread_identity.h
@@ -32,7 +32,7 @@
#include "y_absl/base/config.h"
#include "y_absl/base/internal/per_thread_tls.h"
-#include "y_absl/base/optimization.h"
+#include "y_absl/base/optimization.h"
namespace y_absl {
ABSL_NAMESPACE_BEGIN
@@ -70,28 +70,28 @@ struct PerThreadSynch {
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
- bool wake; // This thread is to be woken from a Mutex.
- // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
- // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
- //
- // The value of "x->cond_waiter" is meaningless if "x" is not on a
- // Mutex waiter list.
- bool cond_waiter;
- bool maybe_unlocking; // Valid at head of Mutex waiter queue;
- // true if UnlockSlow could be searching
- // for a waiter to wake. Used for an optimization
- // in Enqueue(). true is always a valid value.
- // Can be reset to false when the unlocker or any
- // writer releases the lock, or a reader fully
- // releases the lock. It may not be set to false
- // by a reader that decrements the count to
- // non-zero. protected by mutex spinlock
- bool suppress_fatal_errors; // If true, try to proceed even in the face
- // of broken invariants. This is used within
- // fatal signal handlers to improve the
- // chances of debug logging information being
- // output successfully.
- int priority; // Priority of thread (updated every so often).
+ bool wake; // This thread is to be woken from a Mutex.
+ // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+ // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+ //
+ // The value of "x->cond_waiter" is meaningless if "x" is not on a
+ // Mutex waiter list.
+ bool cond_waiter;
+ bool maybe_unlocking; // Valid at head of Mutex waiter queue;
+ // true if UnlockSlow could be searching
+ // for a waiter to wake. Used for an optimization
+ // in Enqueue(). true is always a valid value.
+ // Can be reset to false when the unlocker or any
+ // writer releases the lock, or a reader fully
+ // releases the lock. It may not be set to false
+ // by a reader that decrements the count to
+ // non-zero. protected by mutex spinlock
+ bool suppress_fatal_errors; // If true, try to proceed even in the face
+ // of broken invariants. This is used within
+ // fatal signal handlers to improve the
+ // chances of debug logging information being
+ // output successfully.
+ int priority; // Priority of thread (updated every so often).
// State values:
// kAvailable: This PerThreadSynch is available.
@@ -110,30 +110,30 @@ struct PerThreadSynch {
};
std::atomic<State> state;
- // The wait parameters of the current wait. waitp is null if the
- // thread is not waiting. Transitions from null to non-null must
- // occur before the enqueue commit point (state = kQueued in
- // Enqueue() and CondVarEnqueue()). Transitions from non-null to
- // null must occur after the wait is finished (state = kAvailable in
- // Mutex::Block() and CondVar::WaitCommon()). This field may be
- // changed only by the thread that describes this PerThreadSynch. A
- // special case is Fer(), which calls Enqueue() on another thread,
- // but with an identical SynchWaitParams pointer, thus leaving the
- // pointer unchanged.
- SynchWaitParams* waitp;
+ // The wait parameters of the current wait. waitp is null if the
+ // thread is not waiting. Transitions from null to non-null must
+ // occur before the enqueue commit point (state = kQueued in
+ // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+ // null must occur after the wait is finished (state = kAvailable in
+ // Mutex::Block() and CondVar::WaitCommon()). This field may be
+ // changed only by the thread that describes this PerThreadSynch. A
+ // special case is Fer(), which calls Enqueue() on another thread,
+ // but with an identical SynchWaitParams pointer, thus leaving the
+ // pointer unchanged.
+ SynchWaitParams* waitp;
- intptr_t readers; // Number of readers in mutex.
+ intptr_t readers; // Number of readers in mutex.
- // When priority will next be read (cycles).
- int64_t next_priority_read_cycles;
+ // When priority will next be read (cycles).
+ int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks;
};
-// The instances of this class are allocated in NewThreadIdentity() with an
-// alignment of PerThreadSynch::kAlignment.
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
@@ -143,7 +143,7 @@ struct ThreadIdentity {
// Private: Reserved for y_absl::synchronization_internal::Waiter.
struct WaiterState {
- alignas(void*) char data[128];
+ alignas(void*) char data[128];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -211,9 +211,9 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
-#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
@@ -236,18 +236,18 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#error Thread-local storage not detected on this platform
#endif
-// thread_local variables cannot be in headers exposed by DLLs or in certain
-// build configurations on Apple platforms. However, it is important for
-// performance reasons in general that `CurrentThreadIdentityIfPresent` be
-// inlined. In the other cases we opt to have the function not be inlined. Note
+// thread_local variables cannot be in headers exposed by DLLs or in certain
+// build configurations on Apple platforms. However, it is important for
+// performance reasons in general that `CurrentThreadIdentityIfPresent` be
+// inlined. In the other cases we opt to have the function not be inlined. Note
// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
-// this entire inline definition.
-#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
- !defined(ABSL_CONSUME_DLL)
-#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
-#endif
-
-#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+// this entire inline definition.
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
+#endif
+
+#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
return thread_identity_ptr;
}
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
index dcce5aedc3..489993d30d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate.cc
@@ -18,7 +18,7 @@
#include <functional>
#include <new>
#include <stdexcept>
-
+
#include "y_absl/base/config.h"
#include "y_absl/base/internal/raw_logging.h"
@@ -26,186 +26,186 @@ namespace y_absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-// NOTE: The various STL exception throwing functions are placed within the
-// #ifdef blocks so the symbols aren't exposed on platforms that don't support
-// them, such as the Android NDK. For example, ANGLE fails to link when building
-// within AOSP without them, since the STL functions don't exist.
+// NOTE: The various STL exception throwing functions are placed within the
+// #ifdef blocks so the symbols aren't exposed on platforms that don't support
+// them, such as the Android NDK. For example, ANGLE fails to link when building
+// within AOSP without them, since the STL functions don't exist.
namespace {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
template <typename T>
[[noreturn]] void Throw(const T& error) {
throw error;
-}
+}
#endif
} // namespace
void ThrowStdLogicError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLogicError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdDomainError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdDomainError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdLengthError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLengthError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRangeError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRangeError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const TString& what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
- std::abort();
-#endif
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const char* what_arg) {
-#ifdef ABSL_HAVE_EXCEPTIONS
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
-#else
- ABSL_RAW_LOG(FATAL, "%s", what_arg);
- std::abort();
-#endif
-}
-
-void ThrowStdBadFunctionCall() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_function_call());
-#else
- std::abort();
-#endif
-}
-
-void ThrowStdBadAlloc() {
-#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_alloc());
-#else
- std::abort();
-#endif
-}
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
+}
+
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_function_call());
+#else
+ std::abort();
+#endif
+}
+
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_alloc());
+#else
+ std::abort();
+#endif
+}
} // namespace base_internal
ABSL_NAMESPACE_END
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make
index 1b956ad494..843d4bf838 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/throw_delegate/ya.make
@@ -2,13 +2,13 @@
LIBRARY()
-WITHOUT_LICENSE_TEXTS()
-
-OWNER(
- somov
- g:cpp-contrib
-)
+WITHOUT_LICENSE_TEXTS()
+OWNER(
+ somov
+ g:cpp-contrib
+)
+
LICENSE(Apache-2.0)
PEERDIR(
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h
index 69a61d0814..3777338512 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/tsan_mutex_interface.h
@@ -19,8 +19,8 @@
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
-#include "y_absl/base/config.h"
-
+#include "y_absl/base/config.h"
+
// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
// Macro intended only for internal use.
//
@@ -30,7 +30,7 @@
#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
#endif
-#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
+#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
#if __has_include(<sanitizer/tsan_interface.h>)
#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
#endif
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
index 072a9852fa..f3035252a5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.cc
@@ -139,7 +139,7 @@ double UnscaledCycleClock::Frequency() {
#pragma intrinsic(__rdtsc)
-int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
+int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
index 618c5c7e73..ef3df43616 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/unscaledcycleclock.h
@@ -15,8 +15,8 @@
// UnscaledCycleClock
// An UnscaledCycleClock yields the value and frequency of a cycle counter
// that increments at a rate that is approximately constant.
-// This class is for internal use only, you should consider using CycleClock
-// instead.
+// This class is for internal use only, you should consider using CycleClock
+// instead.
//
// Notes:
// The cycle counter frequency is not necessarily the core clock frequency.
@@ -109,7 +109,7 @@ class UnscaledCycleClock {
// value.
static double Frequency();
- // Allowed users
+ // Allowed users
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;