aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:43 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:43 +0300
commit397cbe258b9e064f49c4ca575279f02f39fef76e (patch)
treea0b0eb3cca6a14e4e8ea715393637672fa651284 /contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
parent43f5a35593ebc9f6bcea619bb170394ea7ae468e (diff)
downloadydb-397cbe258b9e064f49c4ca575279f02f39fef76e.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc')
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc272
1 files changed, 136 insertions, 136 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
index babe5c4ada..75f4bbfae2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
@@ -39,7 +39,7 @@
#include <thread> // NOLINT(build/c++11)
#include "y_absl/base/attributes.h"
-#include "y_absl/base/call_once.h"
+#include "y_absl/base/call_once.h"
#include "y_absl/base/config.h"
#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/internal/atomic_hook.h"
@@ -50,7 +50,7 @@
#include "y_absl/base/internal/spinlock.h"
#include "y_absl/base/internal/sysinfo.h"
#include "y_absl/base/internal/thread_identity.h"
-#include "y_absl/base/internal/tsan_mutex_interface.h"
+#include "y_absl/base/internal/tsan_mutex_interface.h"
#include "y_absl/base/port.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/debugging/symbolize.h"
@@ -60,7 +60,7 @@
using y_absl::base_internal::CurrentThreadIdentityIfPresent;
using y_absl::base_internal::PerThreadSynch;
-using y_absl::base_internal::SchedulingGuard;
+using y_absl::base_internal::SchedulingGuard;
using y_absl::base_internal::ThreadIdentity;
using y_absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using y_absl::synchronization_internal::GraphCycles;
@@ -70,9 +70,9 @@ using y_absl::synchronization_internal::KernelTimeout;
using y_absl::synchronization_internal::PerThreadSem;
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
- std::this_thread::yield();
-}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+ std::this_thread::yield();
+}
} // extern "C"
namespace y_absl {
@@ -80,7 +80,7 @@ ABSL_NAMESPACE_BEGIN
namespace {
-#if defined(ABSL_HAVE_THREAD_SANITIZER)
+#if defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
#else
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
@@ -91,8 +91,8 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
- submit_profile_data;
+y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)>
mutex_tracer;
@@ -127,63 +127,63 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
}
namespace {
-// Represents the strategy for spin and yield.
-// See the comment in GetMutexGlobals() for more information.
-enum DelayMode { AGGRESSIVE, GENTLE };
-
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
- y_absl::once_flag once;
- int spinloop_iterations = 0;
- int32_t mutex_sleep_limit[2] = {};
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
+
+struct ABSL_CACHELINE_ALIGNED MutexGlobals {
+ y_absl::once_flag once;
+ int spinloop_iterations = 0;
+ int32_t mutex_sleep_limit[2] = {};
};
-
-const MutexGlobals &GetMutexGlobals() {
- ABSL_CONST_INIT static MutexGlobals data;
- y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = y_absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
- data.mutex_sleep_limit[AGGRESSIVE] = 5000;
- data.mutex_sleep_limit[GENTLE] = 250;
- } else {
- data.mutex_sleep_limit[AGGRESSIVE] = 0;
- data.mutex_sleep_limit[GENTLE] = 0;
- }
- });
- return data;
-}
-} // namespace
-
-namespace synchronization_internal {
-// Returns the Mutex delay on iteration `c` depending on the given `mode`.
-// The returned value should be used as `c` for the next call to `MutexDelay`.
-int MutexDelay(int32_t c, int mode) {
- const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
+
+const MutexGlobals &GetMutexGlobals() {
+ ABSL_CONST_INIT static MutexGlobals data;
+ y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
+ const int num_cpus = y_absl::base_internal::NumCPUs();
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ if (num_cpus > 1) {
+ data.mutex_sleep_limit[AGGRESSIVE] = 5000;
+ data.mutex_sleep_limit[GENTLE] = 250;
+ } else {
+ data.mutex_sleep_limit[AGGRESSIVE] = 0;
+ data.mutex_sleep_limit[GENTLE] = 0;
+ }
+ });
+ return data;
+}
+} // namespace
+
+namespace synchronization_internal {
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
+int MutexDelay(int32_t c, int mode) {
+ const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
- // Spin.
- c++;
+ // Spin.
+ c++;
} else {
- SchedulingGuard::ScopedEnable enable_rescheduling;
+ SchedulingGuard::ScopedEnable enable_rescheduling;
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) {
- // Yield once.
- ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+ if (c == limit) {
+ // Yield once.
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
- } else {
- // Then wait.
+ } else {
+ // Then wait.
y_absl::SleepFor(y_absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return c;
+ return c;
}
-} // namespace synchronization_internal
+} // namespace synchronization_internal
// --------------------------Generic atomic ops
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
@@ -221,12 +221,12 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
//------------------------------------------------------------------
// Data for doing deadlock detection.
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock deadlock_graph_mu(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock deadlock_graph_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
- ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
+// Graph used to detect deadlocks.
+ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
// An event mechanism for debugging mutex use.
@@ -287,12 +287,12 @@ static const struct {
{0, "SignalAll on "},
};
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Hash table size; should be prime > 2.
// Can't be too small, as it's used for deadlock detection information.
-static constexpr uint32_t kNSynchEvent = 1031;
+static constexpr uint32_t kNSynchEvent = 1031;
static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
@@ -312,7 +312,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
+ char name[1]; // actually longer---NUL-terminated string
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
@@ -503,7 +503,7 @@ struct SynchWaitParams {
std::atomic<intptr_t> *cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
- // to contend for the mutex.
+ // to contend for the mutex.
};
struct SynchLocksHeld {
@@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -717,7 +717,7 @@ static constexpr bool kDebugMode = false;
static constexpr bool kDebugMode = true;
#endif
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
static unsigned TsanFlags(Mutex::MuHow how) {
return how == kShared ? __tsan_mutex_read_lock : 0;
}
@@ -763,13 +763,13 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
-// Return true iff threads x and y are part of the same equivalence
-// class of waiters. An equivalence class is defined as the set of
-// waiters with the same condition, type of lock, and thread priority.
-//
-// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
- return x->waitp->how == y->waitp->how && x->priority == y->priority &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
@@ -788,19 +788,19 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// - invalid (iff x is not in a Mutex wait queue),
// - null, or
// - a pointer to a distinct thread waiting later in the same Mutex queue
-// such that all threads in [x, x->skip] have the same condition, priority
-// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
-// x->skip]).
+// such that all threads in [x, x->skip] have the same condition, priority
+// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+// x->skip]).
// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
//
-// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
// first runnable thread y from the front a Mutex queue to adjust the skip
// field of another thread x because if x->skip==y, x->skip must (have) become
// invalid before y is removed. The function TryRemove can remove a specified
// thread from an arbitrary position in the queue whether runnable or not, so
// it fixes up skip fields that would otherwise be left dangling.
// The statement
-// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
+// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
// maintains the invariant provided x is not the last waiter in a Mutex queue
// The statement
// if (x->skip != null) { x->skip = x->skip->skip; }
@@ -934,17 +934,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
- // No unlocker can be scanning the queue, so we can insert into the
- // middle of the queue.
- //
- // Within a skip chain, all waiters have the same priority, so we can
- // skip forward through the chains until we find one with a lower
- // priority than the waiter to be enqueued.
+ // No unlocker can be scanning the queue, so we can insert into the
+ // middle of the queue.
+ //
+ // Within a skip chain, all waiters have the same priority, so we can
+ // skip forward through the chains until we find one with a lower
+ // priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
- // (side-effect: optimizes skip chain)
- advance_to = Skip(enqueue_after->next);
+ // (side-effect: optimizes skip chain)
+ advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
// termination guaranteed because s->priority > head->priority
// and head is the end of a skip chain
@@ -963,21 +963,21 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// enqueue_after can be: head, Skip(...), or cur.
// The first two imply enqueue_after->skip == nullptr, and
- // the last is used only if MuEquivalentWaiter(s, cur).
+ // the last is used only if MuEquivalentWaiter(s, cur).
// We require this because clearing enqueue_after->skip
// is impossible; enqueue_after's predecessors might also
// incorrectly skip over s if we were to allow other
// insertion points.
- ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
- MuEquivalentWaiter(enqueue_after, s),
- "Mutex Enqueue failure");
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+ MuEquivalentWaiter(enqueue_after, s),
+ "Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
- MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
// enqueue_after can skip to its new successor, s
enqueue_after->skip = enqueue_after->next;
}
- if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
} else { // enqueue not done any other way, so
@@ -987,7 +987,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
- if (head->may_skip && MuEquivalentWaiter(head, s)) {
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
// head now has successor; may skip
head->skip = s;
}
@@ -1007,7 +1007,7 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
pw->next = w->next; // snip w out of list
if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
- } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
@@ -1064,7 +1064,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1077,13 +1077,13 @@ void Mutex::TryRemove(PerThreadSynch *s) {
PerThreadSynch *w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
- // If the current element isn't equivalent to the waiter to be
- // removed, we can skip the entire chain.
- if (!MuEquivalentWaiter(s, w)) {
+ // If the current element isn't equivalent to the waiter to be
+ // removed, we can skip the entire chain.
+ if (!MuEquivalentWaiter(s, w)) {
pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
- // because they are in a different equivalence class.
+ // because they are in a different equivalence class.
} else { // seeking same condition
FixSkip(w, s); // fix up any skip pointer from w to s
pw = w;
@@ -1131,7 +1131,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
this->TryRemove(s);
int c = 0;
while (s->next != nullptr) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
this->TryRemove(s);
}
if (kDebugMode) {
@@ -1374,9 +1374,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
len += static_cast<int>(strlen(&b->buf[len]));
}
}
- ABSL_RAW_LOG(ERROR,
- "Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
- "historical lock ordering graph has been observed",
+ ABSL_RAW_LOG(ERROR,
+ "Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
+ "historical lock ordering graph has been observed",
static_cast<void *>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
@@ -1452,19 +1452,19 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = GetMutexGlobals().spinloop_iterations;
+ int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader|kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- return true;
+ return true;
}
- } while (--c > 0);
- return false;
+ } while (--c > 0);
+ return false;
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
@@ -1763,8 +1763,8 @@ static const intptr_t ignore_waiting_writers[] = {
};
// Internal version of LockWhen(). See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
- int flags) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
"condition untrue on return from LockSlow");
@@ -1779,7 +1779,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// All memory accesses are ignored inside of mutex operations + for unlock
// operation tsan considers that we've already released the mutex.
bool res = false;
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
const int flags = read_lock ? __tsan_mutex_read_lock : 0;
const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
#endif
@@ -1829,9 +1829,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// So we "divert" (which un-ignores both memory accesses and synchronization)
// and then separately turn on ignores of memory accesses.
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
return res;
@@ -1912,7 +1912,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2014,8 +2014,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
- // delay, then try again
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // delay, then try again
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -2032,8 +2032,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2150,7 +2150,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
!old_h->may_skip) { // we used old_h as a terminator
old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
- if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
}
}
@@ -2310,8 +2310,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.store(nv, std::memory_order_release);
break; // out of for(;;)-loop
}
- // aggressive here; no one can proceed till we do
- c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
+ // aggressive here; no one can proceed till we do
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
@@ -2323,8 +2323,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (!cond_waiter) {
// Sample lock contention events only if the (first) waiter was trying to
// acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles =
- base_internal::CycleClock::Now() - enqueue_timestamp;
+ int64_t wait_cycles =
+ base_internal::CycleClock::Now() - enqueue_timestamp;
mutex_tracer("slow release", this, wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(enqueue_timestamp);
@@ -2351,7 +2351,7 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2401,7 +2401,7 @@ void Mutex::Fer(PerThreadSynch *w) {
return;
}
}
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
@@ -2450,7 +2450,7 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2479,8 +2479,8 @@ void CondVar::Remove(PerThreadSynch *s) {
std::memory_order_release);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
}
@@ -2513,7 +2513,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
!cv_word->compare_exchange_weak(v, v | kCvSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
@@ -2612,7 +2612,7 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2645,7 +2645,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2682,8 +2682,8 @@ void CondVar::SignalAll () {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2696,7 +2696,7 @@ void ReleasableMutexLock::Release() {
this->mu_ = nullptr;
}
-#ifdef ABSL_HAVE_THREAD_SANITIZER
+#ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled