aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/synchronization
diff options
context:
space:
mode:
authorayles <ayles@yandex-team.ru>2022-02-10 16:46:11 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:11 +0300
commitd55028e9d9708f94c2d0d35b54ed50d4d7af0456 (patch)
treed69fdff3b2be7d190a1efa078721d25139d0b030 /contrib/restricted/abseil-cpp/absl/synchronization
parentbaa58daefa91fde4b4769facdbd2903763b9c6a8 (diff)
downloadydb-d55028e9d9708f94c2d0d35b54ed50d4d7af0456.tar.gz
Restoring authorship annotation for <ayles@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/synchronization')
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/barrier.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h278
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc10
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h8
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h62
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h6
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc42
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h16
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make8
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc156
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.h30
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/notification.cc4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/notification.h4
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/ya.make8
21 files changed, 335 insertions, 335 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
index 0dfd795e7b..c9fd3a201c 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
@@ -18,7 +18,7 @@
#include "absl/synchronization/mutex.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
// Return whether int *arg is zero.
static bool IsZero(void *arg) {
@@ -48,5 +48,5 @@ bool Barrier::Block() {
return this->num_to_exit_ == 0;
}
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
index d8e754406f..0fc25f561c 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
@@ -23,7 +23,7 @@
#include "absl/synchronization/mutex.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
// Barrier
//
@@ -74,6 +74,6 @@ class Barrier {
int num_to_exit_ ABSL_GUARDED_BY(lock_);
};
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_BARRIER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
index d2f82da3bb..345efa93fe 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
@@ -19,7 +19,7 @@
#include "absl/base/internal/raw_logging.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace {
@@ -63,5 +63,5 @@ void BlockingCounter::Wait() {
// after we return from this method.
}
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
index 1908fdb1d9..53799a9892 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
@@ -26,7 +26,7 @@
#include "absl/synchronization/mutex.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
// BlockingCounter
//
@@ -95,7 +95,7 @@ class BlockingCounter {
bool done_ ABSL_GUARDED_BY(lock_);
};
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
index 53a71b342b..49550a531f 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
@@ -27,14 +27,14 @@
#include "absl/synchronization/internal/per_thread_sem.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// ThreadIdentity storage is persistent, we maintain a free-list of previously
// released ThreadIdentity objects.
-ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
- absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
+ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
// Since we must preserve their storage we cache them for re-use.
@@ -134,7 +134,7 @@ base_internal::ThreadIdentity* CreateThreadIdentity() {
}
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
index e121f68377..2d363223a4 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
@@ -29,7 +29,7 @@
#include "absl/base/port.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// Allocates and attaches a ThreadIdentity object for the calling thread.
@@ -54,7 +54,7 @@ inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
}
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
index 06fbd6d072..40eae732f6 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/futex.h
@@ -1,43 +1,43 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
#ifdef ABSL_INTERNAL_HAVE_FUTEX
#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
#elif defined(__BIONIC__)
@@ -51,104 +51,104 @@
#ifdef ABSL_INTERNAL_HAVE_FUTEX
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_WAIT_BITSET
-#define FUTEX_WAIT_BITSET 9
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#ifndef FUTEX_CLOCK_REALTIME
-#define FUTEX_CLOCK_REALTIME 256
-#endif
-#ifndef FUTEX_BITSET_MATCH_ANY
-#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
-#endif
-#endif
-
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
-class FutexImpl {
- public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- int err = 0;
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (ABSL_PREDICT_FALSE(err != 0)) {
- err = -errno;
- }
- return err;
- }
-
- static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
- int32_t bits,
- const struct timespec *abstime) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- err = -errno;
- }
- return err;
- }
-
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- err = -errno;
- }
- return err;
- }
-
- // FUTEX_WAKE_BITSET
- static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- err = -errno;
- }
- return err;
- }
-};
-
-class Futex : public FutexImpl {};
-
-} // namespace synchronization_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+class FutexImpl {
+ public:
+ static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ KernelTimeout t) {
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ }
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
+ int32_t bits,
+ const struct timespec *abstime) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int Wake(std::atomic<int32_t> *v, int32_t count) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ // FUTEX_WAKE_BITSET
+ static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+};
+
+class Futex : public FutexImpl {};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
#endif // ABSL_INTERNAL_HAVE_FUTEX
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index 27fec21681..a6dab02da1 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -45,16 +45,16 @@
// Do not use STL. This module does not use standard memory allocation.
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
namespace {
// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
// which people are doing things like acquiring Mutexes.
-ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
- absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
+ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
static void InitArenaIfNecessary() {
arena_mu.Lock();
@@ -692,7 +692,7 @@ int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
}
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
index ceba33e4de..7a723ac04f 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
@@ -40,10 +40,10 @@
#include <cstdint>
-#include "absl/base/config.h"
-
+#include "absl/base/config.h"
+
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// Opaque identifier for a graph node.
@@ -135,7 +135,7 @@ class GraphCycles {
};
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
index bbd4d2d70f..338aaf512b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
@@ -26,7 +26,7 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#include <time.h>
-
+
#include <algorithm>
#include <limits>
@@ -35,7 +35,7 @@
#include "absl/time/time.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
class Futex;
@@ -58,10 +58,10 @@ class KernelTimeout {
bool has_timeout() const { return ns_ != 0; }
- // Convert to parameter for sem_timedwait/futex/similar. Only for approved
- // users. Do not call if !has_timeout.
- struct timespec MakeAbsTimespec();
-
+ // Convert to parameter for sem_timedwait/futex/similar. Only for approved
+ // users. Do not call if !has_timeout.
+ struct timespec MakeAbsTimespec();
+
private:
// internal rep, not user visible: ns after unix epoch.
// zero = no timeout.
@@ -125,32 +125,32 @@ class KernelTimeout {
friend class Waiter;
};
-inline struct timespec KernelTimeout::MakeAbsTimespec() {
- int64_t n = ns_;
- static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
- if (n == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timespec from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- n = (std::numeric_limits<int64_t>::max)();
- }
-
- // Kernel APIs validate timespecs as being at or after the epoch,
- // despite the kernel time type being signed. However, no one can
- // tell the difference between a timeout at or before the epoch (since
- // all such timeouts have expired!)
- if (n < 0) n = 0;
-
- struct timespec abstime;
- int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
- abstime.tv_sec = static_cast<time_t>(seconds);
- abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
- return abstime;
-}
-
+inline struct timespec KernelTimeout::MakeAbsTimespec() {
+ int64_t n = ns_;
+ static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
+ if (n == 0) {
+ ABSL_RAW_LOG(
+ ERROR, "Tried to create a timespec from a non-timeout; never do this.");
+ // But we'll try to continue sanely. no-timeout ~= saturated timeout.
+ n = (std::numeric_limits<int64_t>::max)();
+ }
+
+ // Kernel APIs validate timespecs as being at or after the epoch,
+ // despite the kernel time type being signed. However, no one can
+ // tell the difference between a timeout at or before the epoch (since
+ // all such timeouts have expired!)
+ if (n < 0) n = 0;
+
+ struct timespec abstime;
+ int64_t seconds = (std::min)(n / kNanosPerSecond,
+ int64_t{(std::numeric_limits<time_t>::max)()});
+ abstime.tv_sec = static_cast<time_t>(seconds);
+ abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
+ return abstime;
+}
+
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
index a6031787e0..40e232de25 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
@@ -25,7 +25,7 @@
#include "absl/synchronization/internal/waiter.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
@@ -63,7 +63,7 @@ void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
}
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
extern "C" {
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
index 7beae8ef1d..c94a16f82b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
@@ -32,7 +32,7 @@
#include "absl/synchronization/internal/kernel_timeout.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
class Mutex;
@@ -78,7 +78,7 @@ class PerThreadSem {
// !t.has_timeout() => Wait(t) will return true.
static inline bool Wait(KernelTimeout t);
- // Permitted callers.
+ // Permitted callers.
friend class PerThreadSemTest;
friend class absl::Mutex;
friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
@@ -86,7 +86,7 @@ class PerThreadSem {
};
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
// In some build configurations we pass --detect-odr-violations to the
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
index 0cb96dacde..762d74ffce 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
@@ -26,7 +26,7 @@
#include "absl/synchronization/mutex.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// A simple ThreadPool implementation for tests.
@@ -87,7 +87,7 @@ class ThreadPool {
};
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
index 28ef311e4a..9bee8284da 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
@@ -48,9 +48,9 @@
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
-
+
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
static void MaybeBecomeIdle() {
@@ -82,7 +82,7 @@ bool Waiter::Wait(KernelTimeout t) {
while (true) {
int32_t x = futex_.load(std::memory_order_relaxed);
- while (x != 0) {
+ while (x != 0) {
if (!futex_.compare_exchange_weak(x, x - 1,
std::memory_order_acquire,
std::memory_order_relaxed)) {
@@ -258,7 +258,7 @@ bool Waiter::Wait(KernelTimeout t) {
bool first_pass = true;
while (true) {
int x = wakeups_.load(std::memory_order_relaxed);
- while (x != 0) {
+ while (x != 0) {
if (!wakeups_.compare_exchange_weak(x, x - 1,
std::memory_order_acquire,
std::memory_order_relaxed)) {
@@ -312,29 +312,29 @@ class Waiter::WinHelper {
return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
}
- static_assert(sizeof(SRWLOCK) == sizeof(void *),
- "`mu_storage_` does not have the same size as SRWLOCK");
- static_assert(alignof(SRWLOCK) == alignof(void *),
- "`mu_storage_` does not have the same alignment as SRWLOCK");
-
- static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
- "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
- "as `CONDITION_VARIABLE`");
+ static_assert(sizeof(SRWLOCK) == sizeof(void *),
+ "`mu_storage_` does not have the same size as SRWLOCK");
+ static_assert(alignof(SRWLOCK) == alignof(void *),
+ "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+ static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+ "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+ "as `CONDITION_VARIABLE`");
static_assert(
- alignof(CONDITION_VARIABLE) == alignof(void *),
- "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+ alignof(CONDITION_VARIABLE) == alignof(void *),
+ "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
// The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
// and destructible because we never call their constructors or destructors.
static_assert(std::is_trivially_constructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially constructible");
- static_assert(
- std::is_trivially_constructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially constructible");
+ "The `SRWLOCK` type must be trivially constructible");
+ static_assert(
+ std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially constructible");
static_assert(std::is_trivially_destructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially destructible");
+ "The `SRWLOCK` type must be trivially destructible");
static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially destructible");
+ "The `CONDITION_VARIABLE` type must be trivially destructible");
};
class LockHolder {
@@ -424,5 +424,5 @@ void Waiter::InternalCondVarPoke() {
#endif
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
index be3df180d4..e3415fb11d 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
@@ -58,7 +58,7 @@
#endif
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// Waiter is an OS-specific semaphore.
@@ -96,8 +96,8 @@ class Waiter {
}
// How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
- static constexpr int kIdlePeriods = 60;
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
#else
// Memory consumption under ThreadSanitizer is a serious concern,
// so we release resources sooner. The value of 1 leads to 1 to 2 second
@@ -136,10 +136,10 @@ class Waiter {
// REQUIRES: WinHelper::GetLock(this) must be held.
void InternalCondVarPoke();
- // We can't include Windows.h in our headers, so we use aligned charachter
- // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
- alignas(void*) unsigned char mu_storage_[sizeof(void*)];
- alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+ // We can't include Windows.h in our headers, so we use aligned charachter
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
int waiter_count_;
int wakeup_count_;
@@ -149,7 +149,7 @@ class Waiter {
};
} // namespace synchronization_internal
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
index 40f72cf665..6207943f1a 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
@@ -24,10 +24,10 @@ NO_COMPILER_WARNINGS()
NO_UTIL()
-CFLAGS(
- -DNOMINMAX
-)
-
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCS(
graphcycles.cc
)
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
index 76ad41fe16..a429496e9b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
@@ -39,7 +39,7 @@
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
-#include "absl/base/call_once.h"
+#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
@@ -60,7 +60,7 @@
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::PerThreadSynch;
-using absl::base_internal::SchedulingGuard;
+using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
@@ -76,11 +76,11 @@ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
} // extern "C"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
namespace {
-#if defined(ABSL_HAVE_THREAD_SANITIZER)
+#if defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
#else
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
@@ -90,16 +90,16 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
- const char *msg, const void *obj, int64_t wait_cycles)>
- mutex_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
- cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
+ const char *msg, const void *obj, int64_t wait_cycles)>
+ mutex_tracer;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
+ cond_var_tracer;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
bool (*)(const void *pc, char *out, int out_size)>
symbolizer(absl::Symbolize);
@@ -131,15 +131,15 @@ namespace {
// See the comment in GetMutexGlobals() for more information.
enum DelayMode { AGGRESSIVE, GENTLE };
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
- absl::once_flag once;
- int spinloop_iterations = 0;
+struct ABSL_CACHELINE_ALIGNED MutexGlobals {
+ absl::once_flag once;
+ int spinloop_iterations = 0;
int32_t mutex_sleep_limit[2] = {};
-};
-
+};
+
const MutexGlobals &GetMutexGlobals() {
- ABSL_CONST_INIT static MutexGlobals data;
- absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
+ ABSL_CONST_INIT static MutexGlobals data;
+ absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
const int num_cpus = absl::base_internal::NumCPUs();
data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
// If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
@@ -154,36 +154,36 @@ const MutexGlobals &GetMutexGlobals() {
data.mutex_sleep_limit[AGGRESSIVE] = 0;
data.mutex_sleep_limit[GENTLE] = 0;
}
- });
- return data;
-}
+ });
+ return data;
+}
} // namespace
-
-namespace synchronization_internal {
+
+namespace synchronization_internal {
// Returns the Mutex delay on iteration `c` depending on the given `mode`.
// The returned value should be used as `c` for the next call to `MutexDelay`.
-int MutexDelay(int32_t c, int mode) {
+int MutexDelay(int32_t c, int mode) {
const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
- // Spin.
- c++;
+ // Spin.
+ c++;
} else {
- SchedulingGuard::ScopedEnable enable_rescheduling;
+ SchedulingGuard::ScopedEnable enable_rescheduling;
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) {
- // Yield once.
+ if (c == limit) {
+ // Yield once.
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
- } else {
- // Then wait.
+ } else {
+ // Then wait.
absl::SleepFor(absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return c;
+ return c;
}
-} // namespace synchronization_internal
+} // namespace synchronization_internal
// --------------------------Generic atomic ops
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
@@ -221,12 +221,12 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
//------------------------------------------------------------------
// Data for doing deadlock detection.
-ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
- absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
- ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
+// Graph used to detect deadlocks.
+ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
// An event mechanism for debugging mutex use.
@@ -287,12 +287,12 @@ static const struct {
{0, "SignalAll on "},
};
-ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
- absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Hash table size; should be prime > 2.
// Can't be too small, as it's used for deadlock detection information.
-static constexpr uint32_t kNSynchEvent = 1031;
+static constexpr uint32_t kNSynchEvent = 1031;
static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
@@ -312,7 +312,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
+ char name[1]; // actually longer---NUL-terminated string
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
@@ -503,7 +503,7 @@ struct SynchWaitParams {
std::atomic<intptr_t> *cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
- // to contend for the mutex.
+ // to contend for the mutex.
};
struct SynchLocksHeld {
@@ -1064,7 +1064,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1131,7 +1131,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
this->TryRemove(s);
int c = 0;
while (s->next != nullptr) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
this->TryRemove(s);
}
if (kDebugMode) {
@@ -1452,19 +1452,19 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = GetMutexGlobals().spinloop_iterations;
+ int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader|kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- return true;
+ return true;
}
- } while (--c > 0);
- return false;
+ } while (--c > 0);
+ return false;
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
@@ -1763,8 +1763,8 @@ static const intptr_t ignore_waiting_writers[] = {
};
// Internal version of LockWhen(). See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
- int flags) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
"condition untrue on return from LockSlow");
@@ -1829,9 +1829,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// So we "divert" (which un-ignores both memory accesses and synchronization)
// and then separately turn on ignores of memory accesses.
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
return res;
@@ -1912,7 +1912,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2014,8 +2014,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
- // delay, then try again
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // delay, then try again
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -2032,8 +2032,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2310,8 +2310,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.store(nv, std::memory_order_release);
break; // out of for(;;)-loop
}
- // aggressive here; no one can proceed till we do
- c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
+ // aggressive here; no one can proceed till we do
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
@@ -2323,8 +2323,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (!cond_waiter) {
// Sample lock contention events only if the (first) waiter was trying to
// acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles =
- base_internal::CycleClock::Now() - enqueue_timestamp;
+ int64_t wait_cycles =
+ base_internal::CycleClock::Now() - enqueue_timestamp;
mutex_tracer("slow release", this, wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(enqueue_timestamp);
@@ -2351,7 +2351,7 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2401,7 +2401,7 @@ void Mutex::Fer(PerThreadSynch *w) {
return;
}
}
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
@@ -2450,7 +2450,7 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2479,8 +2479,8 @@ void CondVar::Remove(PerThreadSynch *s) {
std::memory_order_release);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
}
@@ -2513,7 +2513,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
!cv_word->compare_exchange_weak(v, v | kCvSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
@@ -2612,7 +2612,7 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2645,7 +2645,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2682,8 +2682,8 @@ void CondVar::SignalAll () {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2696,7 +2696,7 @@ void ReleasableMutexLock::Release() {
this->mu_ = nullptr;
}
-#ifdef ABSL_HAVE_THREAD_SANITIZER
+#ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
@@ -2747,5 +2747,5 @@ bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
a->arg_ == b->arg_ && a->method_ == b->method_;
}
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
index 38338f24df..111eea4c77 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
@@ -74,7 +74,7 @@
#include "absl/time/time.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
class Condition;
struct SynchWaitParams;
@@ -323,16 +323,16 @@ class ABSL_LOCKABLE Mutex {
// Mutex::AwaitWithTimeout()
// Mutex::AwaitWithDeadline()
//
- // Unlocks this `Mutex` and blocks until simultaneously:
+ // Unlocks this `Mutex` and blocks until simultaneously:
// - either `cond` is true or the {timeout has expired, deadline has passed}
// and
// - this `Mutex` can be reacquired,
// then reacquire this `Mutex` in the same mode in which it was previously
// held, returning `true` iff `cond` is `true` on return.
//
- // If the condition is initially `true`, the implementation *may* skip the
- // release/re-acquire step and return immediately.
- //
+ // If the condition is initially `true`, the implementation *may* skip the
+ // release/re-acquire step and return immediately.
+ //
// Deadlines in the past are equivalent to an immediate deadline.
// Negative timeouts are equivalent to a zero timeout.
//
@@ -701,11 +701,11 @@ class Condition {
// return processed_ >= current;
// };
// mu_.Await(Condition(&reached));
- //
- // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
- // the lambda as it may be called when the mutex is being unlocked from a
- // scope holding only a reader lock, which will make the assertion not
- // fulfilled and crash the binary.
+ //
+ // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
+ // the lambda as it may be called when the mutex is being unlocked from a
+ // scope holding only a reader lock, which will make the assertion not
+ // fulfilled and crash the binary.
// See class comment for performance advice. In particular, if there
// might be more than one waiter for the same condition, make sure
@@ -790,8 +790,8 @@ class Condition {
//
class CondVar {
public:
- // A `CondVar` allocated on the heap or on the stack can use the this
- // constructor.
+ // A `CondVar` allocated on the heap or on the stack can use the this
+ // constructor.
CondVar();
~CondVar();
@@ -1005,7 +1005,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
//
// This has the same memory ordering concerns as RegisterMutexProfiler() above.
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
- int64_t wait_cycles));
+ int64_t wait_cycles));
// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
// into a single interface, since they are only ever called in pairs.
@@ -1027,7 +1027,7 @@ void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
//
// 'pc' is the program counter being symbolized, 'out' is the buffer to write
// into, and 'out_size' is the size of the buffer. This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
+// false if symbolizing failed, or true if a NUL-terminated symbol was written
// to 'out.'
//
// This has the same memory ordering concerns as RegisterMutexProfiler() above.
@@ -1066,7 +1066,7 @@ enum class OnDeadlockCycle {
// the manner chosen here.
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
// In some build configurations we pass --detect-odr-violations to the
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc b/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
index e91b903822..1439d41380 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
@@ -22,7 +22,7 @@
#include "absl/time/time.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
void Notification::Notify() {
MutexLock l(&this->mutex_);
@@ -74,5 +74,5 @@ bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
return notified;
}
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
index 9a354ca2c0..ebf0105223 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
@@ -57,7 +57,7 @@
#include "absl/time/time.h"
namespace absl {
-ABSL_NAMESPACE_BEGIN
+ABSL_NAMESPACE_BEGIN
// -----------------------------------------------------------------------------
// Notification
@@ -117,7 +117,7 @@ class Notification {
std::atomic<bool> notified_yet_; // written under mutex_
};
-ABSL_NAMESPACE_END
+ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/ya.make b/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
index 06f72b69e9..91926a20aa 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
@@ -36,10 +36,10 @@ NO_COMPILER_WARNINGS()
NO_UTIL()
-CFLAGS(
- -DNOMINMAX
-)
-
+CFLAGS(
+ -DNOMINMAX
+)
+
SRCS(
barrier.cc
blocking_counter.cc