aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/synchronization
diff options
context:
space:
mode:
authoranastasy888 <anastasy888@yandex-team.ru>2022-02-10 16:45:55 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:55 +0300
commit3a7a498715ef1b66f5054455421b845e45e3a653 (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/restricted/abseil-cpp/absl/synchronization
parent49f765d71da452ea93138a25559dfa68dd76c7f3 (diff)
downloadydb-3a7a498715ef1b66f5054455421b845e45e3a653.tar.gz
Restoring authorship annotation for <anastasy888@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/synchronization')
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc100
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/barrier.h154
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc80
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h188
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc270
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h116
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc1384
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h274
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h252
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc204
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h216
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h182
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc816
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h290
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make58
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc5208
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/mutex.h1982
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/notification.cc152
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/notification.h242
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/ya.make92
20 files changed, 6130 insertions, 6130 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
index fa572f04f8..0dfd795e7b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.cc
@@ -1,52 +1,52 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/barrier.h"
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/barrier.h"
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-// Return whether int *arg is zero.
-static bool IsZero(void *arg) {
- return 0 == *reinterpret_cast<int *>(arg);
-}
-
-bool Barrier::Block() {
- MutexLock l(&this->lock_);
-
- this->num_to_block_--;
- if (this->num_to_block_ < 0) {
- ABSL_RAW_LOG(
- FATAL,
- "Block() called too many times. num_to_block_=%d out of total=%d",
- this->num_to_block_, this->num_to_exit_);
- }
-
- this->lock_.Await(Condition(IsZero, &this->num_to_block_));
-
- // Determine which thread can safely delete this Barrier object
- this->num_to_exit_--;
- ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
-
- // If num_to_exit_ == 0 then all other threads in the barrier have
- // exited the Wait() and have released the Mutex so this thread is
- // free to delete the barrier.
- return this->num_to_exit_ == 0;
-}
-
+
+// Return whether int *arg is zero.
+static bool IsZero(void *arg) {
+ return 0 == *reinterpret_cast<int *>(arg);
+}
+
+bool Barrier::Block() {
+ MutexLock l(&this->lock_);
+
+ this->num_to_block_--;
+ if (this->num_to_block_ < 0) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Block() called too many times. num_to_block_=%d out of total=%d",
+ this->num_to_block_, this->num_to_exit_);
+ }
+
+ this->lock_.Await(Condition(IsZero, &this->num_to_block_));
+
+ // Determine which thread can safely delete this Barrier object
+ this->num_to_exit_--;
+ ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
+
+ // If num_to_exit_ == 0 then all other threads in the barrier have
+ // exited the Wait() and have released the Mutex so this thread is
+ // free to delete the barrier.
+ return this->num_to_exit_ == 0;
+}
+
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
index 8878d7fce8..d8e754406f 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/barrier.h
@@ -1,79 +1,79 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// barrier.h
-// -----------------------------------------------------------------------------
-
-#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
-#define ABSL_SYNCHRONIZATION_BARRIER_H_
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// barrier.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
+#define ABSL_SYNCHRONIZATION_BARRIER_H_
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-// Barrier
-//
-// This class creates a barrier which blocks threads until a prespecified
-// threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes
-// the `Barrier` by calling `Block()` on the barrier, which will block that
-// thread; no call to `Block()` will return until `num_threads` threads have
-// called it.
-//
-// Exactly one call to `Block()` will return `true`, which is then responsible
-// for destroying the barrier; because stack allocation will cause the barrier
-// to be deleted when it is out of scope, barriers should not be stack
-// allocated.
-//
-// Example:
-//
-// // Main thread creates a `Barrier`:
-// barrier = new Barrier(num_threads);
-//
-// // Each participating thread could then call:
-// if (barrier->Block()) delete barrier; // Exactly one call to `Block()`
-// // returns `true`; that call
-// // deletes the barrier.
-class Barrier {
- public:
- // `num_threads` is the number of threads that will participate in the barrier
- explicit Barrier(int num_threads)
- : num_to_block_(num_threads), num_to_exit_(num_threads) {}
-
- Barrier(const Barrier&) = delete;
- Barrier& operator=(const Barrier&) = delete;
-
- // Barrier::Block()
- //
- // Blocks the current thread, and returns only when the `num_threads`
- // threshold of threads utilizing this barrier has been reached. `Block()`
- // returns `true` for precisely one caller, which may then destroy the
- // barrier.
- //
- // Memory ordering: For any threads X and Y, any action taken by X
- // before X calls `Block()` will be visible to Y after Y returns from
- // `Block()`.
- bool Block();
-
- private:
- Mutex lock_;
- int num_to_block_ ABSL_GUARDED_BY(lock_);
- int num_to_exit_ ABSL_GUARDED_BY(lock_);
-};
-
+
+// Barrier
+//
+// This class creates a barrier which blocks threads until a prespecified
+// threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes
+// the `Barrier` by calling `Block()` on the barrier, which will block that
+// thread; no call to `Block()` will return until `num_threads` threads have
+// called it.
+//
+// Exactly one call to `Block()` will return `true`, which is then responsible
+// for destroying the barrier; because stack allocation will cause the barrier
+// to be deleted when it is out of scope, barriers should not be stack
+// allocated.
+//
+// Example:
+//
+// // Main thread creates a `Barrier`:
+// barrier = new Barrier(num_threads);
+//
+// // Each participating thread could then call:
+// if (barrier->Block()) delete barrier; // Exactly one call to `Block()`
+// // returns `true`; that call
+// // deletes the barrier.
+class Barrier {
+ public:
+ // `num_threads` is the number of threads that will participate in the barrier
+ explicit Barrier(int num_threads)
+ : num_to_block_(num_threads), num_to_exit_(num_threads) {}
+
+ Barrier(const Barrier&) = delete;
+ Barrier& operator=(const Barrier&) = delete;
+
+ // Barrier::Block()
+ //
+ // Blocks the current thread, and returns only when the `num_threads`
+ // threshold of threads utilizing this barrier has been reached. `Block()`
+ // returns `true` for precisely one caller, which may then destroy the
+ // barrier.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `Block()` will be visible to Y after Y returns from
+ // `Block()`.
+ bool Block();
+
+ private:
+ Mutex lock_;
+ int num_to_block_ ABSL_GUARDED_BY(lock_);
+ int num_to_exit_ ABSL_GUARDED_BY(lock_);
+};
+
ABSL_NAMESPACE_END
-} // namespace absl
-#endif // ABSL_SYNCHRONIZATION_BARRIER_H_
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_BARRIER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
index 6798a596f1..d2f82da3bb 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.cc
@@ -1,26 +1,26 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/blocking_counter.h"
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/blocking_counter.h"
+
#include <atomic>
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
+
namespace {
// Return whether int *arg is true.
@@ -33,9 +33,9 @@ BlockingCounter::BlockingCounter(int initial_count)
num_waiting_(0),
done_{initial_count == 0 ? true : false} {
ABSL_RAW_CHECK(initial_count >= 0, "BlockingCounter initial_count negative");
-}
-
-bool BlockingCounter::DecrementCount() {
+}
+
+bool BlockingCounter::DecrementCount() {
int count = count_.fetch_sub(1, std::memory_order_acq_rel) - 1;
ABSL_RAW_CHECK(count >= 0,
"BlockingCounter::DecrementCount() called too many times");
@@ -43,25 +43,25 @@ bool BlockingCounter::DecrementCount() {
MutexLock l(&lock_);
done_ = true;
return true;
- }
+ }
return false;
-}
-
-void BlockingCounter::Wait() {
- MutexLock l(&this->lock_);
-
- // only one thread may call Wait(). To support more than one thread,
- // implement a counter num_to_exit, like in the Barrier class.
- ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
- num_waiting_++;
-
+}
+
+void BlockingCounter::Wait() {
+ MutexLock l(&this->lock_);
+
+ // only one thread may call Wait(). To support more than one thread,
+ // implement a counter num_to_exit, like in the Barrier class.
+ ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
+ num_waiting_++;
+
this->lock_.Await(Condition(IsDone, &this->done_));
-
+
// At this point, we know that all threads executing DecrementCount
// will not touch this object again.
- // Therefore, the thread calling this method is free to delete the object
- // after we return from this method.
-}
-
+ // Therefore, the thread calling this method is free to delete the object
+ // after we return from this method.
+}
+
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
index 79410896e9..1908fdb1d9 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/blocking_counter.h
@@ -1,101 +1,101 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// blocking_counter.h
-// -----------------------------------------------------------------------------
-
-#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
-#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
-
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// blocking_counter.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+
#include <atomic>
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-// BlockingCounter
-//
-// This class allows a thread to block for a pre-specified number of actions.
-// `BlockingCounter` maintains a single non-negative abstract integer "count"
-// with an initial value `initial_count`. A thread can then call `Wait()` on
-// this blocking counter to block until the specified number of events occur;
-// worker threads then call 'DecrementCount()` on the counter upon completion of
-// their work. Once the counter's internal "count" reaches zero, the blocked
-// thread unblocks.
-//
-// A `BlockingCounter` requires the following:
-// - its `initial_count` is non-negative.
-// - the number of calls to `DecrementCount()` on it is at most
-// `initial_count`.
-// - `Wait()` is called at most once on it.
-//
-// Given the above requirements, a `BlockingCounter` provides the following
-// guarantees:
-// - Once its internal "count" reaches zero, no legal action on the object
-// can further change the value of "count".
-// - When `Wait()` returns, it is legal to destroy the `BlockingCounter`.
-// - When `Wait()` returns, the number of calls to `DecrementCount()` on
-// this blocking counter exactly equals `initial_count`.
-//
-// Example:
-// BlockingCounter bcount(N); // there are N items of work
-// ... Allow worker threads to start.
-// ... On completing each work item, workers do:
-// ... bcount.DecrementCount(); // an item of work has been completed
-//
-// bcount.Wait(); // wait for all work to be complete
-//
-class BlockingCounter {
- public:
+
+// BlockingCounter
+//
+// This class allows a thread to block for a pre-specified number of actions.
+// `BlockingCounter` maintains a single non-negative abstract integer "count"
+// with an initial value `initial_count`. A thread can then call `Wait()` on
+// this blocking counter to block until the specified number of events occur;
+// worker threads then call 'DecrementCount()` on the counter upon completion of
+// their work. Once the counter's internal "count" reaches zero, the blocked
+// thread unblocks.
+//
+// A `BlockingCounter` requires the following:
+// - its `initial_count` is non-negative.
+// - the number of calls to `DecrementCount()` on it is at most
+// `initial_count`.
+// - `Wait()` is called at most once on it.
+//
+// Given the above requirements, a `BlockingCounter` provides the following
+// guarantees:
+// - Once its internal "count" reaches zero, no legal action on the object
+// can further change the value of "count".
+// - When `Wait()` returns, it is legal to destroy the `BlockingCounter`.
+// - When `Wait()` returns, the number of calls to `DecrementCount()` on
+// this blocking counter exactly equals `initial_count`.
+//
+// Example:
+// BlockingCounter bcount(N); // there are N items of work
+// ... Allow worker threads to start.
+// ... On completing each work item, workers do:
+// ... bcount.DecrementCount(); // an item of work has been completed
+//
+// bcount.Wait(); // wait for all work to be complete
+//
+class BlockingCounter {
+ public:
explicit BlockingCounter(int initial_count);
-
- BlockingCounter(const BlockingCounter&) = delete;
- BlockingCounter& operator=(const BlockingCounter&) = delete;
-
- // BlockingCounter::DecrementCount()
- //
- // Decrements the counter's "count" by one, and return "count == 0". This
- // function requires that "count != 0" when it is called.
- //
- // Memory ordering: For any threads X and Y, any action taken by X
- // before it calls `DecrementCount()` is visible to thread Y after
- // Y's call to `DecrementCount()`, provided Y's call returns `true`.
- bool DecrementCount();
-
- // BlockingCounter::Wait()
- //
- // Blocks until the counter reaches zero. This function may be called at most
- // once. On return, `DecrementCount()` will have been called "initial_count"
- // times and the blocking counter may be destroyed.
- //
- // Memory ordering: For any threads X and Y, any action taken by X
- // before X calls `DecrementCount()` is visible to Y after Y returns
- // from `Wait()`.
- void Wait();
-
- private:
- Mutex lock_;
+
+ BlockingCounter(const BlockingCounter&) = delete;
+ BlockingCounter& operator=(const BlockingCounter&) = delete;
+
+ // BlockingCounter::DecrementCount()
+ //
+ // Decrements the counter's "count" by one, and return "count == 0". This
+ // function requires that "count != 0" when it is called.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before it calls `DecrementCount()` is visible to thread Y after
+ // Y's call to `DecrementCount()`, provided Y's call returns `true`.
+ bool DecrementCount();
+
+ // BlockingCounter::Wait()
+ //
+ // Blocks until the counter reaches zero. This function may be called at most
+ // once. On return, `DecrementCount()` will have been called "initial_count"
+ // times and the blocking counter may be destroyed.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `DecrementCount()` is visible to Y after Y returns
+ // from `Wait()`.
+ void Wait();
+
+ private:
+ Mutex lock_;
std::atomic<int> count_;
- int num_waiting_ ABSL_GUARDED_BY(lock_);
+ int num_waiting_ ABSL_GUARDED_BY(lock_);
bool done_ ABSL_GUARDED_BY(lock_);
-};
-
+};
+
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
index 2d4250f8a8..53a71b342b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
@@ -1,140 +1,140 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <new>
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include <string.h>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdint.h>
+#include <new>
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include <string.h>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// ThreadIdentity storage is persistent, we maintain a free-list of previously
-// released ThreadIdentity objects.
+namespace synchronization_internal {
+
+// ThreadIdentity storage is persistent, we maintain a free-list of previously
+// released ThreadIdentity objects.
ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
- base_internal::ThreadIdentity* identity =
- static_cast<base_internal::ThreadIdentity*>(v);
-
- // all_locks might have been allocated by the Mutex implementation.
- // We free it here when we are notified that our thread is dying.
- if (identity->per_thread_synch.all_locks != nullptr) {
- base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
- }
-
- PerThreadSem::Destroy(identity);
-
- // We must explicitly clear the current thread's identity:
- // (a) Subsequent (unrelated) per-thread destructors may require an identity.
- // We must guarantee a new identity is used in this case (this instructor
- // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
- // (b) ThreadIdentity implementations may depend on memory that is not
- // reinitialized before reuse. We must allow explicit clearing of the
- // association state in this case.
- base_internal::ClearCurrentThreadIdentity();
- {
- base_internal::SpinLockHolder l(&freelist_lock);
- identity->next = thread_identity_freelist;
- thread_identity_freelist = identity;
- }
-}
-
-// Return value rounded up to next multiple of align.
-// Align must be a power of two.
-static intptr_t RoundUp(intptr_t addr, intptr_t align) {
- return (addr + align - 1) & ~(align - 1);
-}
-
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
- base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
- pts->next = nullptr;
- pts->skip = nullptr;
- pts->may_skip = false;
- pts->waitp = nullptr;
- pts->suppress_fatal_errors = false;
- pts->readers = 0;
- pts->priority = 0;
- pts->next_priority_read_cycles = 0;
- pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
- std::memory_order_relaxed);
- pts->maybe_unlocking = false;
- pts->wake = false;
- pts->cond_waiter = false;
- pts->all_locks = nullptr;
- identity->blocked_count_ptr = nullptr;
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
- identity->next = nullptr;
-}
-
-static base_internal::ThreadIdentity* NewThreadIdentity() {
- base_internal::ThreadIdentity* identity = nullptr;
-
- {
- // Re-use a previously released object if possible.
- base_internal::SpinLockHolder l(&freelist_lock);
- if (thread_identity_freelist) {
- identity = thread_identity_freelist; // Take list-head.
- thread_identity_freelist = thread_identity_freelist->next;
- }
- }
-
- if (identity == nullptr) {
- // Allocate enough space to align ThreadIdentity to a multiple of
- // PerThreadSynch::kAlignment. This space is never released (it is
- // added to a freelist by ReclaimThreadIdentity instead).
- void* allocation = base_internal::LowLevelAlloc::Alloc(
- sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
- // Round up the address to the required alignment.
- identity = reinterpret_cast<base_internal::ThreadIdentity*>(
- RoundUp(reinterpret_cast<intptr_t>(allocation),
- base_internal::PerThreadSynch::kAlignment));
- }
- ResetThreadIdentity(identity);
-
- return identity;
-}
-
-// Allocates and attaches ThreadIdentity object for the calling thread. Returns
-// the new identity.
-// REQUIRES: CurrentThreadIdentity(false) == nullptr
-base_internal::ThreadIdentity* CreateThreadIdentity() {
- base_internal::ThreadIdentity* identity = NewThreadIdentity();
- PerThreadSem::Init(identity);
- // Associate the value with the current thread, and attach our destructor.
- base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
- return identity;
-}
-
-} // namespace synchronization_internal
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// Since we must preserve their storage we cache them for re-use.
+void ReclaimThreadIdentity(void* v) {
+ base_internal::ThreadIdentity* identity =
+ static_cast<base_internal::ThreadIdentity*>(v);
+
+ // all_locks might have been allocated by the Mutex implementation.
+ // We free it here when we are notified that our thread is dying.
+ if (identity->per_thread_synch.all_locks != nullptr) {
+ base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
+ }
+
+ PerThreadSem::Destroy(identity);
+
+ // We must explicitly clear the current thread's identity:
+ // (a) Subsequent (unrelated) per-thread destructors may require an identity.
+ // We must guarantee a new identity is used in this case (this instructor
+ // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
+ // (b) ThreadIdentity implementations may depend on memory that is not
+ // reinitialized before reuse. We must allow explicit clearing of the
+ // association state in this case.
+ base_internal::ClearCurrentThreadIdentity();
+ {
+ base_internal::SpinLockHolder l(&freelist_lock);
+ identity->next = thread_identity_freelist;
+ thread_identity_freelist = identity;
+ }
+}
+
+// Return value rounded up to next multiple of align.
+// Align must be a power of two.
+static intptr_t RoundUp(intptr_t addr, intptr_t align) {
+ return (addr + align - 1) & ~(align - 1);
+}
+
+static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+ base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
+ pts->next = nullptr;
+ pts->skip = nullptr;
+ pts->may_skip = false;
+ pts->waitp = nullptr;
+ pts->suppress_fatal_errors = false;
+ pts->readers = 0;
+ pts->priority = 0;
+ pts->next_priority_read_cycles = 0;
+ pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
+ std::memory_order_relaxed);
+ pts->maybe_unlocking = false;
+ pts->wake = false;
+ pts->cond_waiter = false;
+ pts->all_locks = nullptr;
+ identity->blocked_count_ptr = nullptr;
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->next = nullptr;
+}
+
+static base_internal::ThreadIdentity* NewThreadIdentity() {
+ base_internal::ThreadIdentity* identity = nullptr;
+
+ {
+ // Re-use a previously released object if possible.
+ base_internal::SpinLockHolder l(&freelist_lock);
+ if (thread_identity_freelist) {
+ identity = thread_identity_freelist; // Take list-head.
+ thread_identity_freelist = thread_identity_freelist->next;
+ }
+ }
+
+ if (identity == nullptr) {
+ // Allocate enough space to align ThreadIdentity to a multiple of
+ // PerThreadSynch::kAlignment. This space is never released (it is
+ // added to a freelist by ReclaimThreadIdentity instead).
+ void* allocation = base_internal::LowLevelAlloc::Alloc(
+ sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
+ // Round up the address to the required alignment.
+ identity = reinterpret_cast<base_internal::ThreadIdentity*>(
+ RoundUp(reinterpret_cast<intptr_t>(allocation),
+ base_internal::PerThreadSynch::kAlignment));
+ }
+ ResetThreadIdentity(identity);
+
+ return identity;
+}
+
+// Allocates and attaches ThreadIdentity object for the calling thread. Returns
+// the new identity.
+// REQUIRES: CurrentThreadIdentity(false) == nullptr
+base_internal::ThreadIdentity* CreateThreadIdentity() {
+ base_internal::ThreadIdentity* identity = NewThreadIdentity();
+ PerThreadSem::Init(identity);
+ // Associate the value with the current thread, and attach our destructor.
+ base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
+ return identity;
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
index 517c8e49d7..e121f68377 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
@@ -1,60 +1,60 @@
-/*
- * Copyright 2017 The Abseil Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Interface for getting the current ThreadIdentity, creating one if necessary.
-// See thread_identity.h.
-//
-// This file is separate from thread_identity.h because creating a new
-// ThreadIdentity requires slightly higher level libraries (per_thread_sem
-// and low_level_alloc) than accessing an existing one. This separation allows
-// us to have a smaller //absl/base:base.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/port.h"
-
-namespace absl {
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Interface for getting the current ThreadIdentity, creating one if necessary.
+// See thread_identity.h.
+//
+// This file is separate from thread_identity.h because creating a new
+// ThreadIdentity requires slightly higher level libraries (per_thread_sem
+// and low_level_alloc) than accessing an existing one. This separation allows
+// us to have a smaller //absl/base:base.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/port.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Allocates and attaches a ThreadIdentity object for the calling thread.
-// For private use only.
-base_internal::ThreadIdentity* CreateThreadIdentity();
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
-// Returns the ThreadIdentity object representing the calling thread; guaranteed
-// to be unique for its lifetime. The returned object will remain valid for the
-// program's lifetime; although it may be re-assigned to a subsequent thread.
-// If one does not exist for the calling thread, allocate it now.
-inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
- base_internal::ThreadIdentity* identity =
- base_internal::CurrentThreadIdentityIfPresent();
- if (ABSL_PREDICT_FALSE(identity == nullptr)) {
- return CreateThreadIdentity();
- }
- return identity;
-}
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// Allocates and attaches a ThreadIdentity object for the calling thread.
+// For private use only.
+base_internal::ThreadIdentity* CreateThreadIdentity();
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// For private use only.
+void ReclaimThreadIdentity(void* v);
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist for the calling thread, allocate it now.
+inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
+ base_internal::ThreadIdentity* identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ if (ABSL_PREDICT_FALSE(identity == nullptr)) {
+ return CreateThreadIdentity();
+ }
+ return identity;
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index e0b4f0454d..27fec21681 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -1,698 +1,698 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// GraphCycles provides incremental cycle detection on a dynamic
-// graph using the following algorithm:
-//
-// A dynamic topological sort algorithm for directed acyclic graphs
-// David J. Pearce, Paul H. J. Kelly
-// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
-// Volume 11, 2006, Article No. 1.7
-//
-// Brief summary of the algorithm:
-//
-// (1) Maintain a rank for each node that is consistent
-// with the topological sort of the graph. I.e., path from x to y
-// implies rank[x] < rank[y].
-// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
-// (3) Otherwise: adjust ranks in the neighborhood of x and y.
-
-#include "absl/base/attributes.h"
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/graphcycles.h"
-
-#include <algorithm>
-#include <array>
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// GraphCycles provides incremental cycle detection on a dynamic
+// graph using the following algorithm:
+//
+// A dynamic topological sort algorithm for directed acyclic graphs
+// David J. Pearce, Paul H. J. Kelly
+// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
+// Volume 11, 2006, Article No. 1.7
+//
+// Brief summary of the algorithm:
+//
+// (1) Maintain a rank for each node that is consistent
+// with the topological sort of the graph. I.e., path from x to y
+// implies rank[x] < rank[y].
+// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
+// (3) Otherwise: adjust ranks in the neighborhood of x and y.
+
+#include "absl/base/attributes.h"
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/graphcycles.h"
+
+#include <algorithm>
+#include <array>
#include <limits>
-#include "absl/base/internal/hide_ptr.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-
-// Do not use STL. This module does not use standard memory allocation.
-
-namespace absl {
+#include "absl/base/internal/hide_ptr.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// Do not use STL. This module does not use standard memory allocation.
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-namespace {
-
-// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
-// which people are doing things like acquiring Mutexes.
+namespace synchronization_internal {
+
+namespace {
+
+// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
+// which people are doing things like acquiring Mutexes.
ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
-
-static void InitArenaIfNecessary() {
- arena_mu.Lock();
- if (arena == nullptr) {
- arena = base_internal::LowLevelAlloc::NewArena(0);
- }
- arena_mu.Unlock();
-}
-
-// Number of inlined elements in Vec. Hash table implementation
-// relies on this being a power of two.
-static const uint32_t kInline = 8;
-
-// A simple LowLevelAlloc based resizable vector with inlined storage
-// for a few elements. T must be a plain type since constructor
-// and destructor are not run on elements of type T managed by Vec.
-template <typename T>
-class Vec {
- public:
- Vec() { Init(); }
- ~Vec() { Discard(); }
-
- void clear() {
- Discard();
- Init();
- }
-
- bool empty() const { return size_ == 0; }
- uint32_t size() const { return size_; }
- T* begin() { return ptr_; }
- T* end() { return ptr_ + size_; }
- const T& operator[](uint32_t i) const { return ptr_[i]; }
- T& operator[](uint32_t i) { return ptr_[i]; }
- const T& back() const { return ptr_[size_-1]; }
- void pop_back() { size_--; }
-
- void push_back(const T& v) {
- if (size_ == capacity_) Grow(size_ + 1);
- ptr_[size_] = v;
- size_++;
- }
-
- void resize(uint32_t n) {
- if (n > capacity_) Grow(n);
- size_ = n;
- }
-
- void fill(const T& val) {
- for (uint32_t i = 0; i < size(); i++) {
- ptr_[i] = val;
- }
- }
-
- // Guarantees src is empty at end.
- // Provided for the hash table resizing code below.
- void MoveFrom(Vec<T>* src) {
- if (src->ptr_ == src->space_) {
- // Need to actually copy
- resize(src->size_);
- std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
- src->size_ = 0;
- } else {
- Discard();
- ptr_ = src->ptr_;
- size_ = src->size_;
- capacity_ = src->capacity_;
- src->Init();
- }
- }
-
- private:
- T* ptr_;
- T space_[kInline];
- uint32_t size_;
- uint32_t capacity_;
-
- void Init() {
- ptr_ = space_;
- size_ = 0;
- capacity_ = kInline;
- }
-
- void Discard() {
- if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
- }
-
- void Grow(uint32_t n) {
- while (capacity_ < n) {
- capacity_ *= 2;
- }
- size_t request = static_cast<size_t>(capacity_) * sizeof(T);
- T* copy = static_cast<T*>(
- base_internal::LowLevelAlloc::AllocWithArena(request, arena));
- std::copy(ptr_, ptr_ + size_, copy);
- Discard();
- ptr_ = copy;
- }
-
- Vec(const Vec&) = delete;
- Vec& operator=(const Vec&) = delete;
-};
-
-// A hash set of non-negative int32_t that uses Vec for its underlying storage.
-class NodeSet {
- public:
- NodeSet() { Init(); }
-
- void clear() { Init(); }
- bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
-
- bool insert(int32_t v) {
- uint32_t i = FindIndex(v);
- if (table_[i] == v) {
- return false;
- }
- if (table_[i] == kEmpty) {
- // Only inserting over an empty cell increases the number of occupied
- // slots.
- occupied_++;
- }
- table_[i] = v;
- // Double when 75% full.
- if (occupied_ >= table_.size() - table_.size()/4) Grow();
- return true;
- }
-
- void erase(uint32_t v) {
- uint32_t i = FindIndex(v);
- if (static_cast<uint32_t>(table_[i]) == v) {
- table_[i] = kDel;
- }
- }
-
- // Iteration: is done via HASH_FOR_EACH
- // Example:
- // HASH_FOR_EACH(elem, node->out) { ... }
-#define HASH_FOR_EACH(elem, eset) \
- for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
- bool Next(int32_t* cursor, int32_t* elem) {
- while (static_cast<uint32_t>(*cursor) < table_.size()) {
- int32_t v = table_[*cursor];
- (*cursor)++;
- if (v >= 0) {
- *elem = v;
- return true;
- }
- }
- return false;
- }
-
- private:
- enum : int32_t { kEmpty = -1, kDel = -2 };
- Vec<int32_t> table_;
- uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
-
- static uint32_t Hash(uint32_t a) { return a * 41; }
-
- // Return index for storing v. May return an empty index or deleted index
- int FindIndex(int32_t v) const {
- // Search starting at hash index.
- const uint32_t mask = table_.size() - 1;
- uint32_t i = Hash(v) & mask;
- int deleted_index = -1; // If >= 0, index of first deleted element we see
- while (true) {
- int32_t e = table_[i];
- if (v == e) {
- return i;
- } else if (e == kEmpty) {
- // Return any previously encountered deleted slot.
- return (deleted_index >= 0) ? deleted_index : i;
- } else if (e == kDel && deleted_index < 0) {
- // Keep searching since v might be present later.
- deleted_index = i;
- }
- i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
- }
- }
-
- void Init() {
- table_.clear();
- table_.resize(kInline);
- table_.fill(kEmpty);
- occupied_ = 0;
- }
-
- void Grow() {
- Vec<int32_t> copy;
- copy.MoveFrom(&table_);
- occupied_ = 0;
- table_.resize(copy.size() * 2);
- table_.fill(kEmpty);
-
- for (const auto& e : copy) {
- if (e >= 0) insert(e);
- }
- }
-
- NodeSet(const NodeSet&) = delete;
- NodeSet& operator=(const NodeSet&) = delete;
-};
-
-// We encode a node index and a node version in GraphId. The version
-// number is incremented when the GraphId is freed which automatically
-// invalidates all copies of the GraphId.
-
-inline GraphId MakeId(int32_t index, uint32_t version) {
- GraphId g;
- g.handle =
- (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
- return g;
-}
-
-inline int32_t NodeIndex(GraphId id) {
- return static_cast<uint32_t>(id.handle & 0xfffffffful);
-}
-
-inline uint32_t NodeVersion(GraphId id) {
- return static_cast<uint32_t>(id.handle >> 32);
-}
-
-struct Node {
- int32_t rank; // rank number assigned by Pearce-Kelly algorithm
- uint32_t version; // Current version number
- int32_t next_hash; // Next entry in hash table
- bool visited; // Temporary marker used by depth-first-search
- uintptr_t masked_ptr; // User-supplied pointer
- NodeSet in; // List of immediate predecessor nodes in graph
- NodeSet out; // List of immediate successor nodes in graph
- int priority; // Priority of recorded stack trace.
- int nstack; // Depth of recorded stack trace.
- void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
-};
-
-// Hash table for pointer to node index lookups.
-class PointerMap {
- public:
- explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
- table_.fill(-1);
- }
-
- int32_t Find(void* ptr) {
- auto masked = base_internal::HidePtr(ptr);
- for (int32_t i = table_[Hash(ptr)]; i != -1;) {
- Node* n = (*nodes_)[i];
- if (n->masked_ptr == masked) return i;
- i = n->next_hash;
- }
- return -1;
- }
-
- void Add(void* ptr, int32_t i) {
- int32_t* head = &table_[Hash(ptr)];
- (*nodes_)[i]->next_hash = *head;
- *head = i;
- }
-
- int32_t Remove(void* ptr) {
- // Advance through linked list while keeping track of the
- // predecessor slot that points to the current entry.
- auto masked = base_internal::HidePtr(ptr);
- for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
- int32_t index = *slot;
- Node* n = (*nodes_)[index];
- if (n->masked_ptr == masked) {
- *slot = n->next_hash; // Remove n from linked list
- n->next_hash = -1;
- return index;
- }
- slot = &n->next_hash;
- }
- return -1;
- }
-
- private:
- // Number of buckets in hash table for pointer lookups.
- static constexpr uint32_t kHashTableSize = 8171; // should be prime
-
- const Vec<Node*>* nodes_;
- std::array<int32_t, kHashTableSize> table_;
-
- static uint32_t Hash(void* ptr) {
- return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
- }
-};
-
-} // namespace
-
-struct GraphCycles::Rep {
- Vec<Node*> nodes_;
- Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
- PointerMap ptrmap_;
-
- // Temporary state.
- Vec<int32_t> deltaf_; // Results of forward DFS
- Vec<int32_t> deltab_; // Results of backward DFS
- Vec<int32_t> list_; // All nodes to reprocess
- Vec<int32_t> merged_; // Rank values to assign to list_ entries
- Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
-
- Rep() : ptrmap_(&nodes_) {}
-};
-
-static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
- Node* n = rep->nodes_[NodeIndex(id)];
- return (n->version == NodeVersion(id)) ? n : nullptr;
-}
-
-GraphCycles::GraphCycles() {
- InitArenaIfNecessary();
- rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
- Rep;
-}
-
-GraphCycles::~GraphCycles() {
- for (auto* node : rep_->nodes_) {
- node->Node::~Node();
- base_internal::LowLevelAlloc::Free(node);
- }
- rep_->Rep::~Rep();
- base_internal::LowLevelAlloc::Free(rep_);
-}
-
-bool GraphCycles::CheckInvariants() const {
- Rep* r = rep_;
- NodeSet ranks; // Set of ranks seen so far.
- for (uint32_t x = 0; x < r->nodes_.size(); x++) {
- Node* nx = r->nodes_[x];
- void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
- if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
- ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
- }
- if (nx->visited) {
- ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
- }
- if (!ranks.insert(nx->rank)) {
- ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
- }
- HASH_FOR_EACH(y, nx->out) {
- Node* ny = r->nodes_[y];
- if (nx->rank >= ny->rank) {
- ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
- nx->rank, ny->rank);
- }
- }
- }
- return true;
-}
-
-GraphId GraphCycles::GetId(void* ptr) {
- int32_t i = rep_->ptrmap_.Find(ptr);
- if (i != -1) {
- return MakeId(i, rep_->nodes_[i]->version);
- } else if (rep_->free_nodes_.empty()) {
- Node* n =
- new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
- Node;
- n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
- n->visited = false;
- n->rank = rep_->nodes_.size();
- n->masked_ptr = base_internal::HidePtr(ptr);
- n->nstack = 0;
- n->priority = 0;
- rep_->nodes_.push_back(n);
- rep_->ptrmap_.Add(ptr, n->rank);
- return MakeId(n->rank, n->version);
- } else {
- // Preserve preceding rank since the set of ranks in use must be
- // a permutation of [0,rep_->nodes_.size()-1].
- int32_t r = rep_->free_nodes_.back();
- rep_->free_nodes_.pop_back();
- Node* n = rep_->nodes_[r];
- n->masked_ptr = base_internal::HidePtr(ptr);
- n->nstack = 0;
- n->priority = 0;
- rep_->ptrmap_.Add(ptr, r);
- return MakeId(r, n->version);
- }
-}
-
-void GraphCycles::RemoveNode(void* ptr) {
- int32_t i = rep_->ptrmap_.Remove(ptr);
- if (i == -1) {
- return;
- }
- Node* x = rep_->nodes_[i];
- HASH_FOR_EACH(y, x->out) {
- rep_->nodes_[y]->in.erase(i);
- }
- HASH_FOR_EACH(y, x->in) {
- rep_->nodes_[y]->out.erase(i);
- }
- x->in.clear();
- x->out.clear();
- x->masked_ptr = base_internal::HidePtr<void>(nullptr);
- if (x->version == std::numeric_limits<uint32_t>::max()) {
- // Cannot use x any more
- } else {
- x->version++; // Invalidates all copies of node.
- rep_->free_nodes_.push_back(i);
- }
-}
-
-void* GraphCycles::Ptr(GraphId id) {
- Node* n = FindNode(rep_, id);
- return n == nullptr ? nullptr
- : base_internal::UnhidePtr<void>(n->masked_ptr);
-}
-
-bool GraphCycles::HasNode(GraphId node) {
- return FindNode(rep_, node) != nullptr;
-}
-
-bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
- Node* xn = FindNode(rep_, x);
- return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
-}
-
-void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
- Node* xn = FindNode(rep_, x);
- Node* yn = FindNode(rep_, y);
- if (xn && yn) {
- xn->out.erase(NodeIndex(y));
- yn->in.erase(NodeIndex(x));
- // No need to update the rank assignment since a previous valid
- // rank assignment remains valid after an edge deletion.
- }
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
-static void Reorder(GraphCycles::Rep* r);
-static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
-static void MoveToList(
- GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
-
-bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
- Rep* r = rep_;
- const int32_t x = NodeIndex(idx);
- const int32_t y = NodeIndex(idy);
- Node* nx = FindNode(r, idx);
- Node* ny = FindNode(r, idy);
- if (nx == nullptr || ny == nullptr) return true; // Expired ids
-
- if (nx == ny) return false; // Self edge
- if (!nx->out.insert(y)) {
- // Edge already exists.
- return true;
- }
-
- ny->in.insert(x);
-
- if (nx->rank <= ny->rank) {
- // New edge is consistent with existing rank assignment.
- return true;
- }
-
- // Current rank assignments are incompatible with the new edge. Recompute.
- // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
- if (!ForwardDFS(r, y, nx->rank)) {
- // Found a cycle. Undo the insertion and tell caller.
- nx->out.erase(y);
- ny->in.erase(x);
- // Since we do not call Reorder() on this path, clear any visited
- // markers left by ForwardDFS.
- for (const auto& d : r->deltaf_) {
- r->nodes_[d]->visited = false;
- }
- return false;
- }
- BackwardDFS(r, x, ny->rank);
- Reorder(r);
- return true;
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
- // Avoid recursion since stack space might be limited.
- // We instead keep a stack of nodes to visit.
- r->deltaf_.clear();
- r->stack_.clear();
- r->stack_.push_back(n);
- while (!r->stack_.empty()) {
- n = r->stack_.back();
- r->stack_.pop_back();
- Node* nn = r->nodes_[n];
- if (nn->visited) continue;
-
- nn->visited = true;
- r->deltaf_.push_back(n);
-
- HASH_FOR_EACH(w, nn->out) {
- Node* nw = r->nodes_[w];
- if (nw->rank == upper_bound) {
- return false; // Cycle
- }
- if (!nw->visited && nw->rank < upper_bound) {
- r->stack_.push_back(w);
- }
- }
- }
- return true;
-}
-
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
- r->deltab_.clear();
- r->stack_.clear();
- r->stack_.push_back(n);
- while (!r->stack_.empty()) {
- n = r->stack_.back();
- r->stack_.pop_back();
- Node* nn = r->nodes_[n];
- if (nn->visited) continue;
-
- nn->visited = true;
- r->deltab_.push_back(n);
-
- HASH_FOR_EACH(w, nn->in) {
- Node* nw = r->nodes_[w];
- if (!nw->visited && lower_bound < nw->rank) {
- r->stack_.push_back(w);
- }
- }
- }
-}
-
-static void Reorder(GraphCycles::Rep* r) {
- Sort(r->nodes_, &r->deltab_);
- Sort(r->nodes_, &r->deltaf_);
-
- // Adds contents of delta lists to list_ (backwards deltas first).
- r->list_.clear();
- MoveToList(r, &r->deltab_, &r->list_);
- MoveToList(r, &r->deltaf_, &r->list_);
-
- // Produce sorted list of all ranks that will be reassigned.
- r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
- std::merge(r->deltab_.begin(), r->deltab_.end(),
- r->deltaf_.begin(), r->deltaf_.end(),
- r->merged_.begin());
-
- // Assign the ranks in order to the collected list.
- for (uint32_t i = 0; i < r->list_.size(); i++) {
- r->nodes_[r->list_[i]]->rank = r->merged_[i];
- }
-}
-
-static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
- struct ByRank {
- const Vec<Node*>* nodes;
- bool operator()(int32_t a, int32_t b) const {
- return (*nodes)[a]->rank < (*nodes)[b]->rank;
- }
- };
- ByRank cmp;
- cmp.nodes = &nodes;
- std::sort(delta->begin(), delta->end(), cmp);
-}
-
-static void MoveToList(
- GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
- for (auto& v : *src) {
- int32_t w = v;
- v = r->nodes_[w]->rank; // Replace v entry with its rank
- r->nodes_[w]->visited = false; // Prepare for future DFS calls
- dst->push_back(w);
- }
-}
-
-int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
- GraphId path[]) const {
- Rep* r = rep_;
- if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
- const int32_t x = NodeIndex(idx);
- const int32_t y = NodeIndex(idy);
-
- // Forward depth first search starting at x until we hit y.
- // As we descend into a node, we push it onto the path.
- // As we leave a node, we remove it from the path.
- int path_len = 0;
-
- NodeSet seen;
- r->stack_.clear();
- r->stack_.push_back(x);
- while (!r->stack_.empty()) {
- int32_t n = r->stack_.back();
- r->stack_.pop_back();
- if (n < 0) {
- // Marker to indicate that we are leaving a node
- path_len--;
- continue;
- }
-
- if (path_len < max_path_len) {
- path[path_len] = MakeId(n, rep_->nodes_[n]->version);
- }
- path_len++;
- r->stack_.push_back(-1); // Will remove tentative path entry
-
- if (n == y) {
- return path_len;
- }
-
- HASH_FOR_EACH(w, r->nodes_[n]->out) {
- if (seen.insert(w)) {
- r->stack_.push_back(w);
- }
- }
- }
-
- return 0;
-}
-
-bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
- return FindPath(x, y, 0, nullptr) > 0;
-}
-
-void GraphCycles::UpdateStackTrace(GraphId id, int priority,
- int (*get_stack_trace)(void** stack, int)) {
- Node* n = FindNode(rep_, id);
- if (n == nullptr || n->priority >= priority) {
- return;
- }
- n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
- n->priority = priority;
-}
-
-int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
- Node* n = FindNode(rep_, id);
- if (n == nullptr) {
- *ptr = nullptr;
- return 0;
- } else {
- *ptr = n->stack;
- return n->nstack;
- }
-}
-
-} // namespace synchronization_internal
+
+static void InitArenaIfNecessary() {
+ arena_mu.Lock();
+ if (arena == nullptr) {
+ arena = base_internal::LowLevelAlloc::NewArena(0);
+ }
+ arena_mu.Unlock();
+}
+
+// Number of inlined elements in Vec. Hash table implementation
+// relies on this being a power of two.
+static const uint32_t kInline = 8;
+
+// A simple LowLevelAlloc based resizable vector with inlined storage
+// for a few elements. T must be a plain type since constructor
+// and destructor are not run on elements of type T managed by Vec.
+template <typename T>
+class Vec {
+ public:
+ Vec() { Init(); }
+ ~Vec() { Discard(); }
+
+ void clear() {
+ Discard();
+ Init();
+ }
+
+ bool empty() const { return size_ == 0; }
+ uint32_t size() const { return size_; }
+ T* begin() { return ptr_; }
+ T* end() { return ptr_ + size_; }
+ const T& operator[](uint32_t i) const { return ptr_[i]; }
+ T& operator[](uint32_t i) { return ptr_[i]; }
+ const T& back() const { return ptr_[size_-1]; }
+ void pop_back() { size_--; }
+
+ void push_back(const T& v) {
+ if (size_ == capacity_) Grow(size_ + 1);
+ ptr_[size_] = v;
+ size_++;
+ }
+
+ void resize(uint32_t n) {
+ if (n > capacity_) Grow(n);
+ size_ = n;
+ }
+
+ void fill(const T& val) {
+ for (uint32_t i = 0; i < size(); i++) {
+ ptr_[i] = val;
+ }
+ }
+
+ // Guarantees src is empty at end.
+ // Provided for the hash table resizing code below.
+ void MoveFrom(Vec<T>* src) {
+ if (src->ptr_ == src->space_) {
+ // Need to actually copy
+ resize(src->size_);
+ std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ src->size_ = 0;
+ } else {
+ Discard();
+ ptr_ = src->ptr_;
+ size_ = src->size_;
+ capacity_ = src->capacity_;
+ src->Init();
+ }
+ }
+
+ private:
+ T* ptr_;
+ T space_[kInline];
+ uint32_t size_;
+ uint32_t capacity_;
+
+ void Init() {
+ ptr_ = space_;
+ size_ = 0;
+ capacity_ = kInline;
+ }
+
+ void Discard() {
+ if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
+ }
+
+ void Grow(uint32_t n) {
+ while (capacity_ < n) {
+ capacity_ *= 2;
+ }
+ size_t request = static_cast<size_t>(capacity_) * sizeof(T);
+ T* copy = static_cast<T*>(
+ base_internal::LowLevelAlloc::AllocWithArena(request, arena));
+ std::copy(ptr_, ptr_ + size_, copy);
+ Discard();
+ ptr_ = copy;
+ }
+
+ Vec(const Vec&) = delete;
+ Vec& operator=(const Vec&) = delete;
+};
+
+// A hash set of non-negative int32_t that uses Vec for its underlying storage.
+class NodeSet {
+ public:
+ NodeSet() { Init(); }
+
+ void clear() { Init(); }
+ bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
+
+ bool insert(int32_t v) {
+ uint32_t i = FindIndex(v);
+ if (table_[i] == v) {
+ return false;
+ }
+ if (table_[i] == kEmpty) {
+ // Only inserting over an empty cell increases the number of occupied
+ // slots.
+ occupied_++;
+ }
+ table_[i] = v;
+ // Double when 75% full.
+ if (occupied_ >= table_.size() - table_.size()/4) Grow();
+ return true;
+ }
+
+ void erase(uint32_t v) {
+ uint32_t i = FindIndex(v);
+ if (static_cast<uint32_t>(table_[i]) == v) {
+ table_[i] = kDel;
+ }
+ }
+
+ // Iteration: is done via HASH_FOR_EACH
+ // Example:
+ // HASH_FOR_EACH(elem, node->out) { ... }
+#define HASH_FOR_EACH(elem, eset) \
+ for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
+ bool Next(int32_t* cursor, int32_t* elem) {
+ while (static_cast<uint32_t>(*cursor) < table_.size()) {
+ int32_t v = table_[*cursor];
+ (*cursor)++;
+ if (v >= 0) {
+ *elem = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ enum : int32_t { kEmpty = -1, kDel = -2 };
+ Vec<int32_t> table_;
+ uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
+
+ static uint32_t Hash(uint32_t a) { return a * 41; }
+
+ // Return index for storing v. May return an empty index or deleted index
+ int FindIndex(int32_t v) const {
+ // Search starting at hash index.
+ const uint32_t mask = table_.size() - 1;
+ uint32_t i = Hash(v) & mask;
+ int deleted_index = -1; // If >= 0, index of first deleted element we see
+ while (true) {
+ int32_t e = table_[i];
+ if (v == e) {
+ return i;
+ } else if (e == kEmpty) {
+ // Return any previously encountered deleted slot.
+ return (deleted_index >= 0) ? deleted_index : i;
+ } else if (e == kDel && deleted_index < 0) {
+ // Keep searching since v might be present later.
+ deleted_index = i;
+ }
+ i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
+ }
+ }
+
+ void Init() {
+ table_.clear();
+ table_.resize(kInline);
+ table_.fill(kEmpty);
+ occupied_ = 0;
+ }
+
+ void Grow() {
+ Vec<int32_t> copy;
+ copy.MoveFrom(&table_);
+ occupied_ = 0;
+ table_.resize(copy.size() * 2);
+ table_.fill(kEmpty);
+
+ for (const auto& e : copy) {
+ if (e >= 0) insert(e);
+ }
+ }
+
+ NodeSet(const NodeSet&) = delete;
+ NodeSet& operator=(const NodeSet&) = delete;
+};
+
+// We encode a node index and a node version in GraphId. The version
+// number is incremented when the GraphId is freed which automatically
+// invalidates all copies of the GraphId.
+
+inline GraphId MakeId(int32_t index, uint32_t version) {
+ GraphId g;
+ g.handle =
+ (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
+ return g;
+}
+
+inline int32_t NodeIndex(GraphId id) {
+ return static_cast<uint32_t>(id.handle & 0xfffffffful);
+}
+
+inline uint32_t NodeVersion(GraphId id) {
+ return static_cast<uint32_t>(id.handle >> 32);
+}
+
+struct Node {
+ int32_t rank; // rank number assigned by Pearce-Kelly algorithm
+ uint32_t version; // Current version number
+ int32_t next_hash; // Next entry in hash table
+ bool visited; // Temporary marker used by depth-first-search
+ uintptr_t masked_ptr; // User-supplied pointer
+ NodeSet in; // List of immediate predecessor nodes in graph
+ NodeSet out; // List of immediate successor nodes in graph
+ int priority; // Priority of recorded stack trace.
+ int nstack; // Depth of recorded stack trace.
+ void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
+};
+
+// Hash table for pointer to node index lookups.
+class PointerMap {
+ public:
+ explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
+ table_.fill(-1);
+ }
+
+ int32_t Find(void* ptr) {
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t i = table_[Hash(ptr)]; i != -1;) {
+ Node* n = (*nodes_)[i];
+ if (n->masked_ptr == masked) return i;
+ i = n->next_hash;
+ }
+ return -1;
+ }
+
+ void Add(void* ptr, int32_t i) {
+ int32_t* head = &table_[Hash(ptr)];
+ (*nodes_)[i]->next_hash = *head;
+ *head = i;
+ }
+
+ int32_t Remove(void* ptr) {
+ // Advance through linked list while keeping track of the
+ // predecessor slot that points to the current entry.
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
+ int32_t index = *slot;
+ Node* n = (*nodes_)[index];
+ if (n->masked_ptr == masked) {
+ *slot = n->next_hash; // Remove n from linked list
+ n->next_hash = -1;
+ return index;
+ }
+ slot = &n->next_hash;
+ }
+ return -1;
+ }
+
+ private:
+ // Number of buckets in hash table for pointer lookups.
+ static constexpr uint32_t kHashTableSize = 8171; // should be prime
+
+ const Vec<Node*>* nodes_;
+ std::array<int32_t, kHashTableSize> table_;
+
+ static uint32_t Hash(void* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
+ }
+};
+
+} // namespace
+
+struct GraphCycles::Rep {
+ Vec<Node*> nodes_;
+ Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
+ PointerMap ptrmap_;
+
+ // Temporary state.
+ Vec<int32_t> deltaf_; // Results of forward DFS
+ Vec<int32_t> deltab_; // Results of backward DFS
+ Vec<int32_t> list_; // All nodes to reprocess
+ Vec<int32_t> merged_; // Rank values to assign to list_ entries
+ Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
+
+ Rep() : ptrmap_(&nodes_) {}
+};
+
+static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
+ Node* n = rep->nodes_[NodeIndex(id)];
+ return (n->version == NodeVersion(id)) ? n : nullptr;
+}
+
+GraphCycles::GraphCycles() {
+ InitArenaIfNecessary();
+ rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
+ Rep;
+}
+
+GraphCycles::~GraphCycles() {
+ for (auto* node : rep_->nodes_) {
+ node->Node::~Node();
+ base_internal::LowLevelAlloc::Free(node);
+ }
+ rep_->Rep::~Rep();
+ base_internal::LowLevelAlloc::Free(rep_);
+}
+
+bool GraphCycles::CheckInvariants() const {
+ Rep* r = rep_;
+ NodeSet ranks; // Set of ranks seen so far.
+ for (uint32_t x = 0; x < r->nodes_.size(); x++) {
+ Node* nx = r->nodes_[x];
+ void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
+ if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ }
+ if (nx->visited) {
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ }
+ if (!ranks.insert(nx->rank)) {
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ }
+ HASH_FOR_EACH(y, nx->out) {
+ Node* ny = r->nodes_[y];
+ if (nx->rank >= ny->rank) {
+ ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
+ nx->rank, ny->rank);
+ }
+ }
+ }
+ return true;
+}
+
+GraphId GraphCycles::GetId(void* ptr) {
+ int32_t i = rep_->ptrmap_.Find(ptr);
+ if (i != -1) {
+ return MakeId(i, rep_->nodes_[i]->version);
+ } else if (rep_->free_nodes_.empty()) {
+ Node* n =
+ new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
+ Node;
+ n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
+ n->visited = false;
+ n->rank = rep_->nodes_.size();
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->nodes_.push_back(n);
+ rep_->ptrmap_.Add(ptr, n->rank);
+ return MakeId(n->rank, n->version);
+ } else {
+ // Preserve preceding rank since the set of ranks in use must be
+ // a permutation of [0,rep_->nodes_.size()-1].
+ int32_t r = rep_->free_nodes_.back();
+ rep_->free_nodes_.pop_back();
+ Node* n = rep_->nodes_[r];
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->ptrmap_.Add(ptr, r);
+ return MakeId(r, n->version);
+ }
+}
+
+void GraphCycles::RemoveNode(void* ptr) {
+ int32_t i = rep_->ptrmap_.Remove(ptr);
+ if (i == -1) {
+ return;
+ }
+ Node* x = rep_->nodes_[i];
+ HASH_FOR_EACH(y, x->out) {
+ rep_->nodes_[y]->in.erase(i);
+ }
+ HASH_FOR_EACH(y, x->in) {
+ rep_->nodes_[y]->out.erase(i);
+ }
+ x->in.clear();
+ x->out.clear();
+ x->masked_ptr = base_internal::HidePtr<void>(nullptr);
+ if (x->version == std::numeric_limits<uint32_t>::max()) {
+ // Cannot use x any more
+ } else {
+ x->version++; // Invalidates all copies of node.
+ rep_->free_nodes_.push_back(i);
+ }
+}
+
+void* GraphCycles::Ptr(GraphId id) {
+ Node* n = FindNode(rep_, id);
+ return n == nullptr ? nullptr
+ : base_internal::UnhidePtr<void>(n->masked_ptr);
+}
+
+bool GraphCycles::HasNode(GraphId node) {
+ return FindNode(rep_, node) != nullptr;
+}
+
+bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
+ Node* xn = FindNode(rep_, x);
+ return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
+}
+
+void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
+ Node* xn = FindNode(rep_, x);
+ Node* yn = FindNode(rep_, y);
+ if (xn && yn) {
+ xn->out.erase(NodeIndex(y));
+ yn->in.erase(NodeIndex(x));
+ // No need to update the rank assignment since a previous valid
+ // rank assignment remains valid after an edge deletion.
+ }
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
+static void Reorder(GraphCycles::Rep* r);
+static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
+
+bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
+ Rep* r = rep_;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+ Node* nx = FindNode(r, idx);
+ Node* ny = FindNode(r, idy);
+ if (nx == nullptr || ny == nullptr) return true; // Expired ids
+
+ if (nx == ny) return false; // Self edge
+ if (!nx->out.insert(y)) {
+ // Edge already exists.
+ return true;
+ }
+
+ ny->in.insert(x);
+
+ if (nx->rank <= ny->rank) {
+ // New edge is consistent with existing rank assignment.
+ return true;
+ }
+
+ // Current rank assignments are incompatible with the new edge. Recompute.
+ // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
+ if (!ForwardDFS(r, y, nx->rank)) {
+ // Found a cycle. Undo the insertion and tell caller.
+ nx->out.erase(y);
+ ny->in.erase(x);
+ // Since we do not call Reorder() on this path, clear any visited
+ // markers left by ForwardDFS.
+ for (const auto& d : r->deltaf_) {
+ r->nodes_[d]->visited = false;
+ }
+ return false;
+ }
+ BackwardDFS(r, x, ny->rank);
+ Reorder(r);
+ return true;
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
+ // Avoid recursion since stack space might be limited.
+ // We instead keep a stack of nodes to visit.
+ r->deltaf_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltaf_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->out) {
+ Node* nw = r->nodes_[w];
+ if (nw->rank == upper_bound) {
+ return false; // Cycle
+ }
+ if (!nw->visited && nw->rank < upper_bound) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+ return true;
+}
+
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
+ r->deltab_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltab_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->in) {
+ Node* nw = r->nodes_[w];
+ if (!nw->visited && lower_bound < nw->rank) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+}
+
+static void Reorder(GraphCycles::Rep* r) {
+ Sort(r->nodes_, &r->deltab_);
+ Sort(r->nodes_, &r->deltaf_);
+
+ // Adds contents of delta lists to list_ (backwards deltas first).
+ r->list_.clear();
+ MoveToList(r, &r->deltab_, &r->list_);
+ MoveToList(r, &r->deltaf_, &r->list_);
+
+ // Produce sorted list of all ranks that will be reassigned.
+ r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
+ std::merge(r->deltab_.begin(), r->deltab_.end(),
+ r->deltaf_.begin(), r->deltaf_.end(),
+ r->merged_.begin());
+
+ // Assign the ranks in order to the collected list.
+ for (uint32_t i = 0; i < r->list_.size(); i++) {
+ r->nodes_[r->list_[i]]->rank = r->merged_[i];
+ }
+}
+
+static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
+ struct ByRank {
+ const Vec<Node*>* nodes;
+ bool operator()(int32_t a, int32_t b) const {
+ return (*nodes)[a]->rank < (*nodes)[b]->rank;
+ }
+ };
+ ByRank cmp;
+ cmp.nodes = &nodes;
+ std::sort(delta->begin(), delta->end(), cmp);
+}
+
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
+ for (auto& v : *src) {
+ int32_t w = v;
+ v = r->nodes_[w]->rank; // Replace v entry with its rank
+ r->nodes_[w]->visited = false; // Prepare for future DFS calls
+ dst->push_back(w);
+ }
+}
+
+int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
+ GraphId path[]) const {
+ Rep* r = rep_;
+ if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+
+ // Forward depth first search starting at x until we hit y.
+ // As we descend into a node, we push it onto the path.
+ // As we leave a node, we remove it from the path.
+ int path_len = 0;
+
+ NodeSet seen;
+ r->stack_.clear();
+ r->stack_.push_back(x);
+ while (!r->stack_.empty()) {
+ int32_t n = r->stack_.back();
+ r->stack_.pop_back();
+ if (n < 0) {
+ // Marker to indicate that we are leaving a node
+ path_len--;
+ continue;
+ }
+
+ if (path_len < max_path_len) {
+ path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+ }
+ path_len++;
+ r->stack_.push_back(-1); // Will remove tentative path entry
+
+ if (n == y) {
+ return path_len;
+ }
+
+ HASH_FOR_EACH(w, r->nodes_[n]->out) {
+ if (seen.insert(w)) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
+ return FindPath(x, y, 0, nullptr) > 0;
+}
+
+void GraphCycles::UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void** stack, int)) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr || n->priority >= priority) {
+ return;
+ }
+ n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
+ n->priority = priority;
+}
+
+int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr) {
+ *ptr = nullptr;
+ return 0;
+ } else {
+ *ptr = n->stack;
+ return n->nstack;
+ }
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
index 14af15ce00..ceba33e4de 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
@@ -1,141 +1,141 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-
-// GraphCycles detects the introduction of a cycle into a directed
-// graph that is being built up incrementally.
-//
-// Nodes are identified by small integers. It is not possible to
-// record multiple edges with the same (source, destination) pair;
-// requests to add an edge where one already exists are silently
-// ignored.
-//
-// It is also not possible to introduce a cycle; an attempt to insert
-// an edge that would introduce a cycle fails and returns false.
-//
-// GraphCycles uses no internal locking; calls into it should be
-// serialized externally.
-
-// Performance considerations:
-// Works well on sparse graphs, poorly on dense graphs.
-// Extra information is maintained incrementally to detect cycles quickly.
-// InsertEdge() is very fast when the edge already exists, and reasonably fast
-// otherwise.
-// FindPath() is linear in the size of the graph.
-// The current implementation uses O(|V|+|E|) space.
-
-#include <cstdint>
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+
+// GraphCycles detects the introduction of a cycle into a directed
+// graph that is being built up incrementally.
+//
+// Nodes are identified by small integers. It is not possible to
+// record multiple edges with the same (source, destination) pair;
+// requests to add an edge where one already exists are silently
+// ignored.
+//
+// It is also not possible to introduce a cycle; an attempt to insert
+// an edge that would introduce a cycle fails and returns false.
+//
+// GraphCycles uses no internal locking; calls into it should be
+// serialized externally.
+
+// Performance considerations:
+// Works well on sparse graphs, poorly on dense graphs.
+// Extra information is maintained incrementally to detect cycles quickly.
+// InsertEdge() is very fast when the edge already exists, and reasonably fast
+// otherwise.
+// FindPath() is linear in the size of the graph.
+// The current implementation uses O(|V|+|E|) space.
+
+#include <cstdint>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Opaque identifier for a graph node.
-struct GraphId {
- uint64_t handle;
-
- bool operator==(const GraphId& x) const { return handle == x.handle; }
- bool operator!=(const GraphId& x) const { return handle != x.handle; }
-};
-
-// Return an invalid graph id that will never be assigned by GraphCycles.
-inline GraphId InvalidGraphId() {
- return GraphId{0};
-}
-
-class GraphCycles {
- public:
- GraphCycles();
- ~GraphCycles();
-
- // Return the id to use for ptr, assigning one if necessary.
- // Subsequent calls with the same ptr value will return the same id
- // until Remove().
- GraphId GetId(void* ptr);
-
- // Remove "ptr" from the graph. Its corresponding node and all
- // edges to and from it are removed.
- void RemoveNode(void* ptr);
-
- // Return the pointer associated with id, or nullptr if id is not
- // currently in the graph.
- void* Ptr(GraphId id);
-
- // Attempt to insert an edge from source_node to dest_node. If the
- // edge would introduce a cycle, return false without making any
- // changes. Otherwise add the edge and return true.
- bool InsertEdge(GraphId source_node, GraphId dest_node);
-
- // Remove any edge that exists from source_node to dest_node.
- void RemoveEdge(GraphId source_node, GraphId dest_node);
-
- // Return whether node exists in the graph.
- bool HasNode(GraphId node);
-
- // Return whether there is an edge directly from source_node to dest_node.
- bool HasEdge(GraphId source_node, GraphId dest_node) const;
-
- // Return whether dest_node is reachable from source_node
- // by following edges.
- bool IsReachable(GraphId source_node, GraphId dest_node) const;
-
- // Find a path from "source" to "dest". If such a path exists,
- // place the nodes on the path in the array path[], and return
- // the number of nodes on the path. If the path is longer than
- // max_path_len nodes, only the first max_path_len nodes are placed
- // in path[]. The client should compare the return value with
- // max_path_len" to see when this occurs. If no path exists, return
- // 0. Any valid path stored in path[] will start with "source" and
- // end with "dest". There is no guarantee that the path is the
- // shortest, but no node will appear twice in the path, except the
- // source and destination node if they are identical; therefore, the
- // return value is at most one greater than the number of nodes in
- // the graph.
- int FindPath(GraphId source, GraphId dest, int max_path_len,
- GraphId path[]) const;
-
- // Update the stack trace recorded for id with the current stack
- // trace if the last time it was updated had a smaller priority
- // than the priority passed on this call.
- //
- // *get_stack_trace is called to get the stack trace.
- void UpdateStackTrace(GraphId id, int priority,
- int (*get_stack_trace)(void**, int));
-
- // Set *ptr to the beginning of the array that holds the recorded
- // stack trace for id and return the depth of the stack trace.
- int GetStackTrace(GraphId id, void*** ptr);
-
- // Check internal invariants. Crashes on failure, returns true on success.
- // Expensive: should only be called from graphcycles_test.cc.
- bool CheckInvariants() const;
-
- // ----------------------------------------------------
- struct Rep;
- private:
- Rep *rep_; // opaque representation
- GraphCycles(const GraphCycles&) = delete;
- GraphCycles& operator=(const GraphCycles&) = delete;
-};
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// Opaque identifier for a graph node.
+struct GraphId {
+ uint64_t handle;
+
+ bool operator==(const GraphId& x) const { return handle == x.handle; }
+ bool operator!=(const GraphId& x) const { return handle != x.handle; }
+};
+
+// Return an invalid graph id that will never be assigned by GraphCycles.
+inline GraphId InvalidGraphId() {
+ return GraphId{0};
+}
+
+class GraphCycles {
+ public:
+ GraphCycles();
+ ~GraphCycles();
+
+ // Return the id to use for ptr, assigning one if necessary.
+ // Subsequent calls with the same ptr value will return the same id
+ // until Remove().
+ GraphId GetId(void* ptr);
+
+ // Remove "ptr" from the graph. Its corresponding node and all
+ // edges to and from it are removed.
+ void RemoveNode(void* ptr);
+
+ // Return the pointer associated with id, or nullptr if id is not
+ // currently in the graph.
+ void* Ptr(GraphId id);
+
+ // Attempt to insert an edge from source_node to dest_node. If the
+ // edge would introduce a cycle, return false without making any
+ // changes. Otherwise add the edge and return true.
+ bool InsertEdge(GraphId source_node, GraphId dest_node);
+
+ // Remove any edge that exists from source_node to dest_node.
+ void RemoveEdge(GraphId source_node, GraphId dest_node);
+
+ // Return whether node exists in the graph.
+ bool HasNode(GraphId node);
+
+ // Return whether there is an edge directly from source_node to dest_node.
+ bool HasEdge(GraphId source_node, GraphId dest_node) const;
+
+ // Return whether dest_node is reachable from source_node
+ // by following edges.
+ bool IsReachable(GraphId source_node, GraphId dest_node) const;
+
+ // Find a path from "source" to "dest". If such a path exists,
+ // place the nodes on the path in the array path[], and return
+ // the number of nodes on the path. If the path is longer than
+ // max_path_len nodes, only the first max_path_len nodes are placed
+ // in path[]. The client should compare the return value with
+ // max_path_len" to see when this occurs. If no path exists, return
+ // 0. Any valid path stored in path[] will start with "source" and
+ // end with "dest". There is no guarantee that the path is the
+ // shortest, but no node will appear twice in the path, except the
+ // source and destination node if they are identical; therefore, the
+ // return value is at most one greater than the number of nodes in
+ // the graph.
+ int FindPath(GraphId source, GraphId dest, int max_path_len,
+ GraphId path[]) const;
+
+ // Update the stack trace recorded for id with the current stack
+ // trace if the last time it was updated had a smaller priority
+ // than the priority passed on this call.
+ //
+ // *get_stack_trace is called to get the stack trace.
+ void UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void**, int));
+
+ // Set *ptr to the beginning of the array that holds the recorded
+ // stack trace for id and return the depth of the stack trace.
+ int GetStackTrace(GraphId id, void*** ptr);
+
+ // Check internal invariants. Crashes on failure, returns true on success.
+ // Expensive: should only be called from graphcycles_test.cc.
+ bool CheckInvariants() const;
+
+ // ----------------------------------------------------
+ struct Rep;
+ private:
+ Rep *rep_; // opaque representation
+ GraphCycles(const GraphCycles&) = delete;
+ GraphCycles& operator=(const GraphCycles&) = delete;
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif
+} // namespace absl
+
+#endif
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
index 5714fdb05c..bbd4d2d70f 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
@@ -1,130 +1,130 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components that are friends of this class. Higher-level components
-// should build APIs based on absl::Time and absl::Duration.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-
-#include <time.h>
-
-#include <algorithm>
-#include <limits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// An optional absolute timeout, with nanosecond granularity,
+// compatible with absl::Time. Suitable for in-register
+// parameter-passing (e.g. syscalls.)
+// Constructible from a absl::Time (for a timeout to be respected) or {}
+// (for "no timeout".)
+// This is a private low-level API for use by a handful of low-level
+// components that are friends of this class. Higher-level components
+// should build APIs based on absl::Time and absl::Duration.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+
+#include <time.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-class Futex;
-class Waiter;
-
-class KernelTimeout {
- public:
- // A timeout that should expire at <t>. Any value, in the full
- // InfinitePast() to InfiniteFuture() range, is valid here and will be
- // respected.
- explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
- // No timeout.
- KernelTimeout() : ns_(0) {}
-
- // A more explicit factory for those who prefer it. Equivalent to {}.
- static KernelTimeout Never() { return {}; }
-
- // We explicitly do not support other custom formats: timespec, int64_t nanos.
- // Unify on this and absl::Time, please.
-
- bool has_timeout() const { return ns_ != 0; }
-
+namespace synchronization_internal {
+
+class Futex;
+class Waiter;
+
+class KernelTimeout {
+ public:
+ // A timeout that should expire at <t>. Any value, in the full
+ // InfinitePast() to InfiniteFuture() range, is valid here and will be
+ // respected.
+ explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
+ // No timeout.
+ KernelTimeout() : ns_(0) {}
+
+ // A more explicit factory for those who prefer it. Equivalent to {}.
+ static KernelTimeout Never() { return {}; }
+
+ // We explicitly do not support other custom formats: timespec, int64_t nanos.
+ // Unify on this and absl::Time, please.
+
+ bool has_timeout() const { return ns_ != 0; }
+
// Convert to parameter for sem_timedwait/futex/similar. Only for approved
// users. Do not call if !has_timeout.
struct timespec MakeAbsTimespec();
- private:
- // internal rep, not user visible: ns after unix epoch.
- // zero = no timeout.
- // Negative we treat as an unlikely (and certainly expired!) but valid
- // timeout.
- int64_t ns_;
-
- static int64_t MakeNs(absl::Time t) {
- // optimization--InfiniteFuture is common "no timeout" value
- // and cheaper to compare than convert.
- if (t == absl::InfiniteFuture()) return 0;
- int64_t x = ToUnixNanos(t);
-
- // A timeout that lands exactly on the epoch (x=0) needs to be respected,
- // so we alter it unnoticably to 1. Negative timeouts are in
- // theory supported, but handled poorly by the kernel (long
- // delays) so push them forward too; since all such times have
- // already passed, it's indistinguishable.
- if (x <= 0) x = 1;
- // A time larger than what can be represented to the kernel is treated
- // as no timeout.
- if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
- return x;
- }
-
-#ifdef _WIN32
- // Converts to milliseconds from now, or INFINITE when
- // !has_timeout(). For use by SleepConditionVariableSRW on
- // Windows. Callers should recognize that the return value is a
- // relative duration (it should be recomputed by calling this method
- // in the case of a spurious wakeup).
- // This header file may be included transitively by public header files,
- // so we define our own DWORD and INFINITE instead of getting them from
- // <intsafe.h> and <WinBase.h>.
- typedef unsigned long DWord; // NOLINT
- DWord InMillisecondsFromNow() const {
- constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
- if (!has_timeout()) {
- return kInfinite;
- }
- // The use of absl::Now() to convert from absolute time to
- // relative time means that absl::Now() cannot use anything that
- // depends on KernelTimeout (for example, Mutex) on Windows.
- int64_t now = ToUnixNanos(absl::Now());
- if (ns_ >= now) {
- // Round up so that Now() + ms_from_now >= ns_.
- constexpr uint64_t max_nanos =
- (std::numeric_limits<int64_t>::max)() - 999999u;
- uint64_t ms_from_now =
- (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
- }
- return 0;
- }
-#endif
-
- friend class Futex;
- friend class Waiter;
-};
-
+ private:
+ // internal rep, not user visible: ns after unix epoch.
+ // zero = no timeout.
+ // Negative we treat as an unlikely (and certainly expired!) but valid
+ // timeout.
+ int64_t ns_;
+
+ static int64_t MakeNs(absl::Time t) {
+ // optimization--InfiniteFuture is common "no timeout" value
+ // and cheaper to compare than convert.
+ if (t == absl::InfiniteFuture()) return 0;
+ int64_t x = ToUnixNanos(t);
+
+ // A timeout that lands exactly on the epoch (x=0) needs to be respected,
+ // so we alter it unnoticably to 1. Negative timeouts are in
+ // theory supported, but handled poorly by the kernel (long
+ // delays) so push them forward too; since all such times have
+ // already passed, it's indistinguishable.
+ if (x <= 0) x = 1;
+ // A time larger than what can be represented to the kernel is treated
+ // as no timeout.
+ if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
+ return x;
+ }
+
+#ifdef _WIN32
+ // Converts to milliseconds from now, or INFINITE when
+ // !has_timeout(). For use by SleepConditionVariableSRW on
+ // Windows. Callers should recognize that the return value is a
+ // relative duration (it should be recomputed by calling this method
+ // in the case of a spurious wakeup).
+ // This header file may be included transitively by public header files,
+ // so we define our own DWORD and INFINITE instead of getting them from
+ // <intsafe.h> and <WinBase.h>.
+ typedef unsigned long DWord; // NOLINT
+ DWord InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+ // The use of absl::Now() to convert from absolute time to
+ // relative time means that absl::Now() cannot use anything that
+ // depends on KernelTimeout (for example, Mutex) on Windows.
+ int64_t now = ToUnixNanos(absl::Now());
+ if (ns_ >= now) {
+ // Round up so that Now() + ms_from_now >= ns_.
+ constexpr uint64_t max_nanos =
+ (std::numeric_limits<int64_t>::max)() - 999999u;
+ uint64_t ms_from_now =
+ (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+ }
+ return 0;
+ }
+#endif
+
+ friend class Futex;
+ friend class Waiter;
+};
+
inline struct timespec KernelTimeout::MakeAbsTimespec() {
int64_t n = ns_;
static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
@@ -149,8 +149,8 @@ inline struct timespec KernelTimeout::MakeAbsTimespec() {
return abstime;
}
-} // namespace synchronization_internal
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
index 16fc09ef86..a6031787e0 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
@@ -1,106 +1,106 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/waiter.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/waiter.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
- base_internal::ThreadIdentity *identity;
- identity = GetOrCreateCurrentThreadIdentity();
- identity->blocked_count_ptr = counter;
-}
-
-std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
- base_internal::ThreadIdentity *identity;
- identity = GetOrCreateCurrentThreadIdentity();
- return identity->blocked_count_ptr;
-}
-
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
- new (Waiter::GetWaiter(identity)) Waiter();
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
- Waiter::GetWaiter(identity)->~Waiter();
-}
-
-void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
- const int ticker =
- identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
- // Wakeup the waiting thread since it is time for it to become idle.
- Waiter::GetWaiter(identity)->Poke();
- }
-}
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ identity->blocked_count_ptr = counter;
+}
+
+std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ return identity->blocked_count_ptr;
+}
+
+void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
+ new (Waiter::GetWaiter(identity)) Waiter();
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+}
+
+void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
+ Waiter::GetWaiter(identity)->~Waiter();
+}
+
+void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
+ const int ticker =
+ identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
+ // Wakeup the waiting thread since it is time for it to become idle.
+ Waiter::GetWaiter(identity)->Poke();
+ }
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-extern "C" {
-
+} // namespace absl
+
+extern "C" {
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
- absl::base_internal::ThreadIdentity *identity) {
- absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
-}
-
+ absl::base_internal::ThreadIdentity *identity) {
+ absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
+}
+
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
- absl::synchronization_internal::KernelTimeout t) {
- bool timeout = false;
- absl::base_internal::ThreadIdentity *identity;
- identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
-
- // Ensure wait_start != 0.
- int ticker = identity->ticker.load(std::memory_order_relaxed);
- identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-
- if (identity->blocked_count_ptr != nullptr) {
- // Increment count of threads blocked in a given thread pool.
- identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
- }
-
- timeout =
- !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
-
- if (identity->blocked_count_ptr != nullptr) {
- identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
- }
-
- identity->is_idle.store(false, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- return !timeout;
-}
-
-} // extern "C"
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+ absl::synchronization_internal::KernelTimeout t) {
+ bool timeout = false;
+ absl::base_internal::ThreadIdentity *identity;
+ identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+
+ // Ensure wait_start != 0.
+ int ticker = identity->ticker.load(std::memory_order_relaxed);
+ identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ // Increment count of threads blocked in a given thread pool.
+ identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ timeout =
+ !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ return !timeout;
+}
+
+} // extern "C"
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
index 25187fcb98..7beae8ef1d 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
@@ -1,115 +1,115 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// PerThreadSem is a low-level synchronization primitive controlling the
-// runnability of a single thread, used internally by Mutex and CondVar.
-//
-// This is NOT a general-purpose synchronization mechanism, and should not be
-// used directly by applications. Applications should use Mutex and CondVar.
-//
-// The semantics of PerThreadSem are the same as that of a counting semaphore.
-// Each thread maintains an abstract "count" value associated with its identity.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-
-#include <atomic>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/create_thread_identity.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// PerThreadSem is a low-level synchronization primitive controlling the
+// runnability of a single thread, used internally by Mutex and CondVar.
+//
+// This is NOT a general-purpose synchronization mechanism, and should not be
+// used directly by applications. Applications should use Mutex and CondVar.
+//
+// The semantics of PerThreadSem are the same as that of a counting semaphore.
+// Each thread maintains an abstract "count" value associated with its identity.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+
+#include <atomic>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-class Mutex;
-
-namespace synchronization_internal {
-
-class PerThreadSem {
- public:
- PerThreadSem() = delete;
- PerThreadSem(const PerThreadSem&) = delete;
- PerThreadSem& operator=(const PerThreadSem&) = delete;
-
- // Routine invoked periodically (once a second) by a background thread.
- // Has no effect on user-visible state.
- static void Tick(base_internal::ThreadIdentity* identity);
-
- // ---------------------------------------------------------------------------
- // Routines used by autosizing threadpools to detect when threads are
- // blocked. Each thread has a counter pointer, initially zero. If non-zero,
- // the implementation atomically increments the counter when it blocks on a
- // semaphore, a decrements it again when it wakes. This allows a threadpool
- // to keep track of how many of its threads are blocked.
- // SetThreadBlockedCounter() should be used only by threadpool
- // implementations. GetThreadBlockedCounter() should be used by modules that
- // block threads; if the pointer returned is non-zero, the location should be
- // incremented before the thread blocks, and decremented after it wakes.
- static void SetThreadBlockedCounter(std::atomic<int> *counter);
- static std::atomic<int> *GetThreadBlockedCounter();
-
- private:
- // Create the PerThreadSem associated with "identity". Initializes count=0.
- // REQUIRES: May only be called by ThreadIdentity.
- static void Init(base_internal::ThreadIdentity* identity);
-
- // Destroy the PerThreadSem associated with "identity".
- // REQUIRES: May only be called by ThreadIdentity.
- static void Destroy(base_internal::ThreadIdentity* identity);
-
- // Increments "identity"'s count.
- static inline void Post(base_internal::ThreadIdentity* identity);
-
- // Waits until either our count > 0 or t has expired.
- // If count > 0, decrements count and returns true. Otherwise returns false.
- // !t.has_timeout() => Wait(t) will return true.
- static inline bool Wait(KernelTimeout t);
-
+
+class Mutex;
+
+namespace synchronization_internal {
+
+class PerThreadSem {
+ public:
+ PerThreadSem() = delete;
+ PerThreadSem(const PerThreadSem&) = delete;
+ PerThreadSem& operator=(const PerThreadSem&) = delete;
+
+ // Routine invoked periodically (once a second) by a background thread.
+ // Has no effect on user-visible state.
+ static void Tick(base_internal::ThreadIdentity* identity);
+
+ // ---------------------------------------------------------------------------
+ // Routines used by autosizing threadpools to detect when threads are
+ // blocked. Each thread has a counter pointer, initially zero. If non-zero,
+ // the implementation atomically increments the counter when it blocks on a
+ // semaphore, a decrements it again when it wakes. This allows a threadpool
+ // to keep track of how many of its threads are blocked.
+ // SetThreadBlockedCounter() should be used only by threadpool
+ // implementations. GetThreadBlockedCounter() should be used by modules that
+ // block threads; if the pointer returned is non-zero, the location should be
+ // incremented before the thread blocks, and decremented after it wakes.
+ static void SetThreadBlockedCounter(std::atomic<int> *counter);
+ static std::atomic<int> *GetThreadBlockedCounter();
+
+ private:
+ // Create the PerThreadSem associated with "identity". Initializes count=0.
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Init(base_internal::ThreadIdentity* identity);
+
+ // Destroy the PerThreadSem associated with "identity".
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Destroy(base_internal::ThreadIdentity* identity);
+
+ // Increments "identity"'s count.
+ static inline void Post(base_internal::ThreadIdentity* identity);
+
+ // Waits until either our count > 0 or t has expired.
+ // If count > 0, decrements count and returns true. Otherwise returns false.
+ // !t.has_timeout() => Wait(t) will return true.
+ static inline bool Wait(KernelTimeout t);
+
// Permitted callers.
- friend class PerThreadSemTest;
- friend class absl::Mutex;
- friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
- friend void ReclaimThreadIdentity(void* v);
-};
-
-} // namespace synchronization_internal
+ friend class PerThreadSemTest;
+ friend class absl::Mutex;
+ friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
+ friend void ReclaimThreadIdentity(void* v);
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker. This causes it to flag weak symbol overrides as ODR
-// violations. Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
- absl::base_internal::ThreadIdentity* identity);
+ absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
- absl::synchronization_internal::KernelTimeout t);
-} // extern "C"
-
-void absl::synchronization_internal::PerThreadSem::Post(
- absl::base_internal::ThreadIdentity* identity) {
+ absl::synchronization_internal::KernelTimeout t);
+} // extern "C"
+
+void absl::synchronization_internal::PerThreadSem::Post(
+ absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
-}
-
-bool absl::synchronization_internal::PerThreadSem::Wait(
- absl::synchronization_internal::KernelTimeout t) {
+}
+
+bool absl::synchronization_internal::PerThreadSem::Wait(
+ absl::synchronization_internal::KernelTimeout t) {
return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
-}
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+}
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
index 78447e001a..0cb96dacde 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
@@ -1,93 +1,93 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-
-#include <cassert>
-#include <cstddef>
-#include <functional>
-#include <queue>
-#include <thread> // NOLINT(build/c++11)
-#include <vector>
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <queue>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// A simple ThreadPool implementation for tests.
-class ThreadPool {
- public:
- explicit ThreadPool(int num_threads) {
- for (int i = 0; i < num_threads; ++i) {
- threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
- }
- }
-
- ThreadPool(const ThreadPool &) = delete;
- ThreadPool &operator=(const ThreadPool &) = delete;
-
- ~ThreadPool() {
- {
- absl::MutexLock l(&mu_);
- for (size_t i = 0; i < threads_.size(); i++) {
- queue_.push(nullptr); // Shutdown signal.
- }
- }
- for (auto &t : threads_) {
- t.join();
- }
- }
-
- // Schedule a function to be run on a ThreadPool thread immediately.
- void Schedule(std::function<void()> func) {
- assert(func != nullptr);
- absl::MutexLock l(&mu_);
- queue_.push(std::move(func));
- }
-
- private:
- bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- return !queue_.empty();
- }
-
- void WorkLoop() {
- while (true) {
- std::function<void()> func;
- {
- absl::MutexLock l(&mu_);
- mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
- func = std::move(queue_.front());
- queue_.pop();
- }
- if (func == nullptr) { // Shutdown signal.
- break;
- }
- func();
- }
- }
-
- absl::Mutex mu_;
- std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
- std::vector<std::thread> threads_;
-};
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// A simple ThreadPool implementation for tests.
+class ThreadPool {
+ public:
+ explicit ThreadPool(int num_threads) {
+ for (int i = 0; i < num_threads; ++i) {
+ threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
+ }
+ }
+
+ ThreadPool(const ThreadPool &) = delete;
+ ThreadPool &operator=(const ThreadPool &) = delete;
+
+ ~ThreadPool() {
+ {
+ absl::MutexLock l(&mu_);
+ for (size_t i = 0; i < threads_.size(); i++) {
+ queue_.push(nullptr); // Shutdown signal.
+ }
+ }
+ for (auto &t : threads_) {
+ t.join();
+ }
+ }
+
+ // Schedule a function to be run on a ThreadPool thread immediately.
+ void Schedule(std::function<void()> func) {
+ assert(func != nullptr);
+ absl::MutexLock l(&mu_);
+ queue_.push(std::move(func));
+ }
+
+ private:
+ bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return !queue_.empty();
+ }
+
+ void WorkLoop() {
+ while (true) {
+ std::function<void()> func;
+ {
+ absl::MutexLock l(&mu_);
+ mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
+ func = std::move(queue_.front());
+ queue_.pop();
+ }
+ if (func == nullptr) { // Shutdown signal.
+ break;
+ }
+ func();
+ }
+ }
+
+ absl::Mutex mu_;
+ std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
+ std::vector<std::thread> threads_;
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
index d68a525854..28ef311e4a 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
@@ -1,317 +1,317 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <new>
+#include <type_traits>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
- base_internal::ThreadIdentity *identity =
- base_internal::CurrentThreadIdentityIfPresent();
- assert(identity != nullptr);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- const int ticker = identity->ticker.load(std::memory_order_relaxed);
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
- identity->is_idle.store(true, std::memory_order_relaxed);
- }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
- // Loop until we can atomically decrement futex from a positive
- // value, waiting on a futex while we believe it is zero.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
-
- while (true) {
- int32_t x = futex_.load(std::memory_order_relaxed);
+namespace synchronization_internal {
+
+static void MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+
+Waiter::Waiter() {
+ futex_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
while (x != 0) {
- if (!futex_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- return true; // Consumed a wakeup, we are done.
- }
-
- if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
- if (err != 0) {
- if (err == -EINTR || err == -EWOULDBLOCK) {
- // Do nothing, the loop will retry.
- } else if (err == -ETIMEDOUT) {
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- if (futex_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- // Wake one thread waiting on the futex.
- const int err = Futex::Wake(&futex_, 1);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
- explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
- const int err = pthread_mutex_lock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
- }
- }
-
- PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
- PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
- ~PthreadMutexHolder() {
- const int err = pthread_mutex_unlock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
- }
- }
-
- private:
- pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
- const int err = pthread_mutex_init(&mu_, 0);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
- }
-
- const int err2 = pthread_cond_init(&cv_, 0);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
- }
-
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-Waiter::~Waiter() {
- const int err = pthread_mutex_destroy(&mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
- }
-
- const int err2 = pthread_cond_destroy(&cv_);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
- }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- PthreadMutexHolder h(&mu_);
- ++waiter_count_;
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!t.has_timeout()) {
- const int err = pthread_cond_wait(&cv_, &mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
- }
- } else {
- const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
- if (err == ETIMEDOUT) {
- --waiter_count_;
- return false;
- }
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- PthreadMutexHolder h(&mu_);
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- PthreadMutexHolder h(&mu_);
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- const int err = pthread_cond_signal(&cv_);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
- }
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
- if (sem_init(&sem_, 0, 0) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
- }
- wakeups_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() {
- if (sem_destroy(&sem_) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
- }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- // Loop until we timeout or consume a wakeup.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (true) {
- int x = wakeups_.load(std::memory_order_relaxed);
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = Futex::WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+
+Waiter::Waiter() {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+Waiter::~Waiter() {
+ const int err = pthread_mutex_destroy(&mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_destroy(&cv_);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+
+Waiter::Waiter() {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+ wakeups_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() {
+ if (sem_destroy(&sem_) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
while (x != 0) {
- if (!wakeups_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- // Successfully consumed a wakeup, we're done.
- return true;
- }
-
- if (!first_pass) MaybeBecomeIdle();
- // Nothing to consume, wait (looping on EINTR).
- while (true) {
- if (!t.has_timeout()) {
- if (sem_wait(&sem_) == 0) break;
- if (errno == EINTR) continue;
- ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
- } else {
- if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
- if (errno == EINTR) continue;
- if (errno == ETIMEDOUT) return false;
- ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- // Post a wakeup.
- if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
- ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
- static SRWLOCK *GetLock(Waiter *w) {
- return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
- }
-
- static CONDITION_VARIABLE *GetCond(Waiter *w) {
- return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
- }
-
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+
+class Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
static_assert(sizeof(SRWLOCK) == sizeof(void *),
"`mu_storage_` does not have the same size as SRWLOCK");
static_assert(alignof(SRWLOCK) == alignof(void *),
@@ -320,109 +320,109 @@ class Waiter::WinHelper {
static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
"`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
"as `CONDITION_VARIABLE`");
- static_assert(
+ static_assert(
alignof(CONDITION_VARIABLE) == alignof(void *),
"`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
- // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
- // and destructible because we never call their constructors or destructors.
- static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
"The `SRWLOCK` type must be trivially constructible");
static_assert(
std::is_trivially_constructible<CONDITION_VARIABLE>::value,
"The `CONDITION_VARIABLE` type must be trivially constructible");
- static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
"The `SRWLOCK` type must be trivially destructible");
- static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
"The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
- explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
- AcquireSRWLockExclusive(mu_);
- }
-
- LockHolder(const LockHolder&) = delete;
- LockHolder& operator=(const LockHolder&) = delete;
-
- ~LockHolder() {
- ReleaseSRWLockExclusive(mu_);
- }
-
- private:
- SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
- auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
- auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
- InitializeSRWLock(mu);
- InitializeConditionVariable(cv);
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
- SRWLOCK *mu = WinHelper::GetLock(this);
- CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
- LockHolder h(mu);
- ++waiter_count_;
-
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
- if (err == ERROR_TIMEOUT) {
- --waiter_count_;
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- LockHolder h(WinHelper::GetLock(this));
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- LockHolder h(WinHelper::GetLock(this));
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- WakeConditionVariable(WinHelper::GetCond(this));
- }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-} // namespace synchronization_internal
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Waiter::Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+// SRW locks and condition variables do not need to be explicitly destroyed.
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+#else
+#error Unknown ABSL_WAITER_MODE
+#endif
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
index 5d0ad76461..be3df180d4 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
@@ -1,155 +1,155 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#else
+#include <pthread.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/futex.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
-#define ABSL_WAITER_MODE_FUTEX 0
-#define ABSL_WAITER_MODE_SEM 1
-#define ABSL_WAITER_MODE_CONDVAR 2
-#define ABSL_WAITER_MODE_WIN32 3
-
-#if defined(ABSL_FORCE_WAITER_MODE)
-#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
+#define ABSL_WAITER_MODE_FUTEX 0
+#define ABSL_WAITER_MODE_SEM 1
+#define ABSL_WAITER_MODE_CONDVAR 2
+#define ABSL_WAITER_MODE_WIN32 3
+
+#if defined(ABSL_FORCE_WAITER_MODE)
+#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
+#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
-#endif
-
-namespace absl {
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
+#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
+#else
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
- // Prepare any data to track waits.
- Waiter();
-
- // Not copyable or movable
- Waiter(const Waiter&) = delete;
- Waiter& operator=(const Waiter&) = delete;
-
- // Destroy any data to track waits.
- ~Waiter();
-
- // Blocks the calling thread until a matching call to `Post()` or
- // `t` has passed. Returns `true` if woken (`Post()` called),
- // `false` on timeout.
- bool Wait(KernelTimeout t);
-
- // Restart the caller of `Wait()` as with a normal semaphore.
- void Post();
-
- // If anyone is waiting, wake them up temporarily and cause them to
- // call `MaybeBecomeIdle()`. They will then return to waiting for a
- // `Post()` or timeout.
- void Poke();
-
- // Returns the Waiter associated with the identity.
- static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
- static_assert(
- sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
- "Insufficient space for Waiter");
- return reinterpret_cast<Waiter*>(identity->waiter_state.data);
- }
-
- // How many periods to remain idle before releasing resources
+namespace synchronization_internal {
+
+// Waiter is an OS-specific semaphore.
+class Waiter {
+ public:
+ // Prepare any data to track waits.
+ Waiter();
+
+ // Not copyable or movable
+ Waiter(const Waiter&) = delete;
+ Waiter& operator=(const Waiter&) = delete;
+
+ // Destroy any data to track waits.
+ ~Waiter();
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ void Poke();
+
+ // Returns the Waiter associated with the identity.
+ static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<Waiter*>(identity->waiter_state.data);
+ }
+
+ // How many periods to remain idle before releasing resources
#ifndef ABSL_HAVE_THREAD_SANITIZER
static constexpr int kIdlePeriods = 60;
-#else
- // Memory consumption under ThreadSanitizer is a serious concern,
- // so we release resources sooner. The value of 1 leads to 1 to 2 second
- // delay before marking a thread as idle.
- static const int kIdlePeriods = 1;
-#endif
-
- private:
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be 32-bits.
- // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
- std::atomic<int32_t> futex_;
- static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
- // REQUIRES: mu_ must be held.
- void InternalCondVarPoke();
-
- pthread_mutex_t mu_;
- pthread_cond_t cv_;
- int waiter_count_;
- int wakeup_count_; // Unclaimed wakeups.
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
- sem_t sem_;
- // This seems superfluous, but for Poke() we need to cause spurious
- // wakeups on the semaphore. Hence we can't actually use the
- // semaphore's count.
- std::atomic<int> wakeups_;
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
- // WinHelper - Used to define utilities for accessing the lock and
- // condition variable storage once the types are complete.
- class WinHelper;
-
- // REQUIRES: WinHelper::GetLock(this) must be held.
- void InternalCondVarPoke();
-
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static const int kIdlePeriods = 1;
+#endif
+
+ private:
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+ sem_t sem_;
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
// We can't include Windows.h in our headers, so we use aligned charachter
// buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
alignas(void*) unsigned char mu_storage_[sizeof(void*)];
alignas(void*) unsigned char cv_storage_[sizeof(void*)];
- int waiter_count_;
- int wakeup_count_;
-
-#else
- #error Unknown ABSL_WAITER_MODE
-#endif
-};
-
-} // namespace synchronization_internal
+ int waiter_count_;
+ int wakeup_count_;
+
+#else
+ #error Unknown ABSL_WAITER_MODE
+#endif
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
index b4cbb122ab..40f72cf665 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
@@ -1,35 +1,35 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCS(
- graphcycles.cc
-)
-
-END()
+SRCS(
+ graphcycles.cc
+)
+
+END()
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
index 22a40ef21e..76ad41fe16 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.cc
@@ -1,98 +1,98 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/mutex.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#ifdef ERROR
-#undef ERROR
-#endif
-#else
-#include <fcntl.h>
-#include <pthread.h>
-#include <sched.h>
-#include <sys/time.h>
-#endif
-
-#include <assert.h>
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-
-#include <algorithm>
-#include <atomic>
-#include <cinttypes>
-#include <thread> // NOLINT(build/c++11)
-
-#include "absl/base/attributes.h"
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/mutex.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#ifdef ERROR
+#undef ERROR
+#endif
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/time.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cinttypes>
+#include <thread> // NOLINT(build/c++11)
+
+#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
-#include "absl/base/config.h"
-#include "absl/base/dynamic_annotations.h"
-#include "absl/base/internal/atomic_hook.h"
-#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/hide_ptr.h"
-#include "absl/base/internal/low_level_alloc.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/sysinfo.h"
-#include "absl/base/internal/thread_identity.h"
+#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/hide_ptr.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/port.h"
-#include "absl/debugging/stacktrace.h"
-#include "absl/debugging/symbolize.h"
-#include "absl/synchronization/internal/graphcycles.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-#include "absl/time/time.h"
-
-using absl::base_internal::CurrentThreadIdentityIfPresent;
-using absl::base_internal::PerThreadSynch;
+#include "absl/base/port.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/symbolize.h"
+#include "absl/synchronization/internal/graphcycles.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/time/time.h"
+
+using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::PerThreadSynch;
using absl::base_internal::SchedulingGuard;
-using absl::base_internal::ThreadIdentity;
-using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
-using absl::synchronization_internal::GraphCycles;
-using absl::synchronization_internal::GraphId;
-using absl::synchronization_internal::InvalidGraphId;
-using absl::synchronization_internal::KernelTimeout;
-using absl::synchronization_internal::PerThreadSem;
-
-extern "C" {
+using absl::base_internal::ThreadIdentity;
+using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
+using absl::synchronization_internal::GraphCycles;
+using absl::synchronization_internal::GraphId;
+using absl::synchronization_internal::InvalidGraphId;
+using absl::synchronization_internal::KernelTimeout;
+using absl::synchronization_internal::PerThreadSem;
+
+extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
std::this_thread::yield();
}
-} // extern "C"
-
-namespace absl {
+} // extern "C"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-namespace {
-
+
+namespace {
+
#if defined(ABSL_HAVE_THREAD_SANITIZER)
-constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
-#else
-constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
-#endif
-
-ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
- kDeadlockDetectionDefault);
-ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
-
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
+#else
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
+#endif
+
+ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
+ kDeadlockDetectionDefault);
+ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
- submit_profile_data;
+ submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)>
mutex_tracer;
@@ -100,32 +100,32 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
cond_var_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
- bool (*)(const void *pc, char *out, int out_size)>
- symbolizer(absl::Symbolize);
-
-} // namespace
-
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
- bool locking, bool trylock,
- bool read_lock);
-
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
- submit_profile_data.Store(fn);
-}
-
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
- int64_t wait_cycles)) {
- mutex_tracer.Store(fn);
-}
-
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
- cond_var_tracer.Store(fn);
-}
-
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
- symbolizer.Store(fn);
-}
-
+ bool (*)(const void *pc, char *out, int out_size)>
+ symbolizer(absl::Symbolize);
+
+} // namespace
+
+static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+ bool locking, bool trylock,
+ bool read_lock);
+
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+ submit_profile_data.Store(fn);
+}
+
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ int64_t wait_cycles)) {
+ mutex_tracer.Store(fn);
+}
+
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+ cond_var_tracer.Store(fn);
+}
+
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
+ symbolizer.Store(fn);
+}
+
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
@@ -164,605 +164,605 @@ namespace synchronization_internal {
// The returned value should be used as `c` for the next call to `MutexDelay`.
int MutexDelay(int32_t c, int mode) {
const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
- if (c < limit) {
+ if (c < limit) {
// Spin.
c++;
- } else {
+ } else {
SchedulingGuard::ScopedEnable enable_rescheduling;
- ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
if (c == limit) {
// Yield once.
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
- c++;
+ c++;
} else {
// Then wait.
- absl::SleepFor(absl::Microseconds(10));
- c = 0;
- }
- ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
- }
+ absl::SleepFor(absl::Microseconds(10));
+ c = 0;
+ }
+ ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
+ }
return c;
-}
+}
} // namespace synchronization_internal
-
-// --------------------------Generic atomic ops
-// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
-// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
-// before making any change.
-// This is used to set flags in mutex and condition variable words.
-static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
- intptr_t wait_until_clear) {
- intptr_t v;
- do {
- v = pv->load(std::memory_order_relaxed);
- } while ((v & bits) != bits &&
- ((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v | bits,
- std::memory_order_release,
- std::memory_order_relaxed)));
-}
-
-// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
-// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
-// before making any change.
-// This is used to unset flags in mutex and condition variable words.
-static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
- intptr_t wait_until_clear) {
- intptr_t v;
- do {
- v = pv->load(std::memory_order_relaxed);
- } while ((v & bits) != 0 &&
- ((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v & ~bits,
- std::memory_order_release,
- std::memory_order_relaxed)));
-}
-
-//------------------------------------------------------------------
-
-// Data for doing deadlock detection.
+
+// --------------------------Generic atomic ops
+// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
+// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to set flags in mutex and condition variable words.
+static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != bits &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v | bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
+// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to unset flags in mutex and condition variable words.
+static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != 0 &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v & ~bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+//------------------------------------------------------------------
+
+// Data for doing deadlock detection.
ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
+
// Graph used to detect deadlocks.
ABSL_CONST_INIT static GraphCycles *deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
-
-//------------------------------------------------------------------
-// An event mechanism for debugging mutex use.
-// It also allows mutexes to be given names for those who can't handle
-// addresses, and instead like to give their data structures names like
-// "Henry", "Fido", or "Rupert IV, King of Yondavia".
-
-namespace { // to prevent name pollution
-enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
- // Mutex events
- SYNCH_EV_TRYLOCK_SUCCESS,
- SYNCH_EV_TRYLOCK_FAILED,
- SYNCH_EV_READERTRYLOCK_SUCCESS,
- SYNCH_EV_READERTRYLOCK_FAILED,
- SYNCH_EV_LOCK,
- SYNCH_EV_LOCK_RETURNING,
- SYNCH_EV_READERLOCK,
- SYNCH_EV_READERLOCK_RETURNING,
- SYNCH_EV_UNLOCK,
- SYNCH_EV_READERUNLOCK,
-
- // CondVar events
- SYNCH_EV_WAIT,
- SYNCH_EV_WAIT_RETURNING,
- SYNCH_EV_SIGNAL,
- SYNCH_EV_SIGNALALL,
-};
-
-enum { // Event flags
- SYNCH_F_R = 0x01, // reader event
- SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
- SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
- SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
-
- SYNCH_F_LCK_W = SYNCH_F_LCK,
- SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
-};
-} // anonymous namespace
-
-// Properties of the events.
-static const struct {
- int flags;
- const char *msg;
-} event_properties[] = {
- {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
- {0, "TryLock failed "},
- {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
- {0, "ReaderTryLock failed "},
- {0, "Lock blocking "},
- {SYNCH_F_LCK_W, "Lock returning "},
- {0, "ReaderLock blocking "},
- {SYNCH_F_LCK_R, "ReaderLock returning "},
- {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
- {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
- {0, "Wait on "},
- {0, "Wait unblocked "},
- {0, "Signal on "},
- {0, "SignalAll on "},
-};
-
+
+//------------------------------------------------------------------
+// An event mechanism for debugging mutex use.
+// It also allows mutexes to be given names for those who can't handle
+// addresses, and instead like to give their data structures names like
+// "Henry", "Fido", or "Rupert IV, King of Yondavia".
+
+namespace { // to prevent name pollution
+enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
+ // Mutex events
+ SYNCH_EV_TRYLOCK_SUCCESS,
+ SYNCH_EV_TRYLOCK_FAILED,
+ SYNCH_EV_READERTRYLOCK_SUCCESS,
+ SYNCH_EV_READERTRYLOCK_FAILED,
+ SYNCH_EV_LOCK,
+ SYNCH_EV_LOCK_RETURNING,
+ SYNCH_EV_READERLOCK,
+ SYNCH_EV_READERLOCK_RETURNING,
+ SYNCH_EV_UNLOCK,
+ SYNCH_EV_READERUNLOCK,
+
+ // CondVar events
+ SYNCH_EV_WAIT,
+ SYNCH_EV_WAIT_RETURNING,
+ SYNCH_EV_SIGNAL,
+ SYNCH_EV_SIGNALALL,
+};
+
+enum { // Event flags
+ SYNCH_F_R = 0x01, // reader event
+ SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
+ SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
+ SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
+
+ SYNCH_F_LCK_W = SYNCH_F_LCK,
+ SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
+};
+} // anonymous namespace
+
+// Properties of the events.
+static const struct {
+ int flags;
+ const char *msg;
+} event_properties[] = {
+ {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
+ {0, "TryLock failed "},
+ {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
+ {0, "ReaderTryLock failed "},
+ {0, "Lock blocking "},
+ {SYNCH_F_LCK_W, "Lock returning "},
+ {0, "ReaderLock blocking "},
+ {SYNCH_F_LCK_R, "ReaderLock returning "},
+ {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
+ {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
+ {0, "Wait on "},
+ {0, "Wait unblocked "},
+ {0, "Signal on "},
+ {0, "SignalAll on "},
+};
+
ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// Hash table size; should be prime > 2.
-// Can't be too small, as it's used for deadlock detection information.
+
+// Hash table size; should be prime > 2.
+// Can't be too small, as it's used for deadlock detection information.
static constexpr uint32_t kNSynchEvent = 1031;
-
-static struct SynchEvent { // this is a trivial hash table for the events
- // struct is freed when refcount reaches 0
- int refcount ABSL_GUARDED_BY(synch_event_mu);
-
- // buckets have linear, 0-terminated chains
- SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
-
- // Constant after initialization
- uintptr_t masked_addr; // object at this address is called "name"
-
- // No explicit synchronization used. Instead we assume that the
- // client who enables/disables invariants/logging on a Mutex does so
- // while the Mutex is not being concurrently accessed by others.
- void (*invariant)(void *arg); // called on each event
- void *arg; // first arg to (*invariant)()
- bool log; // logging turned on
-
- // Constant after initialization
+
+static struct SynchEvent { // this is a trivial hash table for the events
+ // struct is freed when refcount reaches 0
+ int refcount ABSL_GUARDED_BY(synch_event_mu);
+
+ // buckets have linear, 0-terminated chains
+ SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+
+ // Constant after initialization
+ uintptr_t masked_addr; // object at this address is called "name"
+
+ // No explicit synchronization used. Instead we assume that the
+ // client who enables/disables invariants/logging on a Mutex does so
+ // while the Mutex is not being concurrently accessed by others.
+ void (*invariant)(void *arg); // called on each event
+ void *arg; // first arg to (*invariant)()
+ bool log; // logging turned on
+
+ // Constant after initialization
char name[1]; // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
-
-// Ensure that the object at "addr" has a SynchEvent struct associated with it,
-// set "bits" in the word there (waiting until lockbit is clear before doing
-// so), and return a refcounted reference that will remain valid until
-// UnrefSynchEvent() is called. If a new SynchEvent is allocated,
-// the string name is copied into it.
-// When used with a mutex, the caller should also ensure that kMuEvent
-// is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
- const char *name, intptr_t bits,
- intptr_t lockbit) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
- // first look for existing SynchEvent struct..
- synch_event_mu.Lock();
- for (e = synch_event[h];
- e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- e = e->next) {
- }
- if (e == nullptr) { // no SynchEvent struct found; make one.
- if (name == nullptr) {
- name = "";
- }
- size_t l = strlen(name);
- e = reinterpret_cast<SynchEvent *>(
- base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
- e->refcount = 2; // one for return value, one for linked list
- e->masked_addr = base_internal::HidePtr(addr);
- e->invariant = nullptr;
- e->arg = nullptr;
- e->log = false;
- strcpy(e->name, name); // NOLINT(runtime/printf)
- e->next = synch_event[h];
- AtomicSetBits(addr, bits, lockbit);
- synch_event[h] = e;
- } else {
- e->refcount++; // for return value
- }
- synch_event_mu.Unlock();
- return e;
-}
-
-// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
- base_internal::LowLevelAlloc::Free(e);
-}
-
-// Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
- if (e != nullptr) {
- synch_event_mu.Lock();
- bool del = (--(e->refcount) == 0);
- synch_event_mu.Unlock();
- if (del) {
- DeleteSynchEvent(e);
- }
- }
-}
-
-// Forget the mapping from the object (Mutex or CondVar) at address addr
-// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
-// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
- intptr_t lockbit) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
- SynchEvent **pe;
- SynchEvent *e;
- synch_event_mu.Lock();
- for (pe = &synch_event[h];
- (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- pe = &e->next) {
- }
- bool del = false;
- if (e != nullptr) {
- *pe = e->next;
- del = (--(e->refcount) == 0);
- }
- AtomicClearBits(addr, bits, lockbit);
- synch_event_mu.Unlock();
- if (del) {
- DeleteSynchEvent(e);
- }
-}
-
-// Return a refcounted reference to the SynchEvent of the object at address
-// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
-// called.
-static SynchEvent *GetSynchEvent(const void *addr) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
- synch_event_mu.Lock();
- for (e = synch_event[h];
- e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- e = e->next) {
- }
- if (e != nullptr) {
- e->refcount++;
- }
- synch_event_mu.Unlock();
- return e;
-}
-
-// Called when an event "ev" occurs on a Mutex of CondVar "obj"
-// if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
- SynchEvent *e = GetSynchEvent(obj);
- // logging is on if event recording is on and either there's no event struct,
- // or it explicitly says to log
- if (e == nullptr || e->log) {
- void *pcs[40];
- int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
- // A buffer with enough space for the ASCII for all the PCs, even on a
- // 64-bit machine.
- char buffer[ABSL_ARRAYSIZE(pcs) * 24];
- int pos = snprintf(buffer, sizeof (buffer), " @");
- for (int i = 0; i != n; i++) {
- pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
- }
- ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
- (e == nullptr ? "" : e->name), buffer);
- }
- const int flags = event_properties[ev].flags;
- if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
- // Calling the invariant as is causes problems under ThreadSanitizer.
- // We are currently inside of Mutex Lock/Unlock and are ignoring all
- // memory accesses and synchronization. If the invariant transitively
- // synchronizes something else and we ignore the synchronization, we will
- // get false positive race reports later.
- // Reuse EvalConditionAnnotated to properly call into user code.
- struct local {
- static bool pred(SynchEvent *ev) {
- (*ev->invariant)(ev->arg);
- return false;
- }
- };
- Condition cond(&local::pred, e);
- Mutex *mu = static_cast<Mutex *>(obj);
- const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
- const bool trylock = (flags & SYNCH_F_TRY) != 0;
- const bool read_lock = (flags & SYNCH_F_R) != 0;
- EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
- }
- UnrefSynchEvent(e);
-}
-
-//------------------------------------------------------------------
-
-// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
-// whether it has a timeout, the condition, exclusive/shared, and whether a
-// condition variable wait has an associated Mutex (as opposed to another
-// type of lock). It also points to the PerThreadSynch struct of its thread.
-// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
-//
-// This structure is held on the stack rather than directly in
-// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
-// while waiting on one Mutex, the implementation calls a client callback
-// (such as a Condition function) that acquires another Mutex. We don't
-// strictly need to allow this, but programmers become confused if we do not
-// allow them to use functions such a LOG() within Condition functions. The
-// PerThreadSynch struct points at the most recent SynchWaitParams struct when
-// the thread is on a Mutex's waiter queue.
-struct SynchWaitParams {
- SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
- KernelTimeout timeout_arg, Mutex *cvmu_arg,
- PerThreadSynch *thread_arg,
- std::atomic<intptr_t> *cv_word_arg)
- : how(how_arg),
- cond(cond_arg),
- timeout(timeout_arg),
- cvmu(cvmu_arg),
- thread(thread_arg),
- cv_word(cv_word_arg),
- contention_start_cycles(base_internal::CycleClock::Now()) {}
-
- const Mutex::MuHow how; // How this thread needs to wait.
- const Condition *cond; // The condition that this thread is waiting for.
- // In Mutex, this field is set to zero if a timeout
- // expires.
- KernelTimeout timeout; // timeout expiry---absolute time
- // In Mutex, this field is set to zero if a timeout
- // expires.
- Mutex *const cvmu; // used for transfer from cond var to mutex
- PerThreadSynch *const thread; // thread that is waiting
-
- // If not null, thread should be enqueued on the CondVar whose state
- // word is cv_word instead of queueing normally on the Mutex.
- std::atomic<intptr_t> *cv_word;
-
- int64_t contention_start_cycles; // Time (in cycles) when this thread started
+} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+
+// Ensure that the object at "addr" has a SynchEvent struct associated with it,
+// set "bits" in the word there (waiting until lockbit is clear before doing
+// so), and return a refcounted reference that will remain valid until
+// UnrefSynchEvent() is called. If a new SynchEvent is allocated,
+// the string name is copied into it.
+// When used with a mutex, the caller should also ensure that kMuEvent
+// is set in the mutex word, and similarly for condition variables and kCVEvent.
+static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
+ const char *name, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ // first look for existing SynchEvent struct..
+ synch_event_mu.Lock();
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
+ }
+ if (e == nullptr) { // no SynchEvent struct found; make one.
+ if (name == nullptr) {
+ name = "";
+ }
+ size_t l = strlen(name);
+ e = reinterpret_cast<SynchEvent *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
+ e->refcount = 2; // one for return value, one for linked list
+ e->masked_addr = base_internal::HidePtr(addr);
+ e->invariant = nullptr;
+ e->arg = nullptr;
+ e->log = false;
+ strcpy(e->name, name); // NOLINT(runtime/printf)
+ e->next = synch_event[h];
+ AtomicSetBits(addr, bits, lockbit);
+ synch_event[h] = e;
+ } else {
+ e->refcount++; // for return value
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
+static void DeleteSynchEvent(SynchEvent *e) {
+ base_internal::LowLevelAlloc::Free(e);
+}
+
+// Decrement the reference count of *e, or do nothing if e==null.
+static void UnrefSynchEvent(SynchEvent *e) {
+ if (e != nullptr) {
+ synch_event_mu.Lock();
+ bool del = (--(e->refcount) == 0);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+ }
+}
+
+// Forget the mapping from the object (Mutex or CondVar) at address addr
+// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
+// is clear before doing so).
+static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent **pe;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (pe = &synch_event[h];
+ (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ pe = &e->next) {
+ }
+ bool del = false;
+ if (e != nullptr) {
+ *pe = e->next;
+ del = (--(e->refcount) == 0);
+ }
+ AtomicClearBits(addr, bits, lockbit);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+}
+
+// Return a refcounted reference to the SynchEvent of the object at address
+// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
+// called.
+static SynchEvent *GetSynchEvent(const void *addr) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
+ }
+ if (e != nullptr) {
+ e->refcount++;
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Called when an event "ev" occurs on a Mutex of CondVar "obj"
+// if event recording is on
+static void PostSynchEvent(void *obj, int ev) {
+ SynchEvent *e = GetSynchEvent(obj);
+ // logging is on if event recording is on and either there's no event struct,
+ // or it explicitly says to log
+ if (e == nullptr || e->log) {
+ void *pcs[40];
+ int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
+ // A buffer with enough space for the ASCII for all the PCs, even on a
+ // 64-bit machine.
+ char buffer[ABSL_ARRAYSIZE(pcs) * 24];
+ int pos = snprintf(buffer, sizeof (buffer), " @");
+ for (int i = 0; i != n; i++) {
+ pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
+ }
+ ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
+ (e == nullptr ? "" : e->name), buffer);
+ }
+ const int flags = event_properties[ev].flags;
+ if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
+ // Calling the invariant as is causes problems under ThreadSanitizer.
+ // We are currently inside of Mutex Lock/Unlock and are ignoring all
+ // memory accesses and synchronization. If the invariant transitively
+ // synchronizes something else and we ignore the synchronization, we will
+ // get false positive race reports later.
+ // Reuse EvalConditionAnnotated to properly call into user code.
+ struct local {
+ static bool pred(SynchEvent *ev) {
+ (*ev->invariant)(ev->arg);
+ return false;
+ }
+ };
+ Condition cond(&local::pred, e);
+ Mutex *mu = static_cast<Mutex *>(obj);
+ const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
+ const bool trylock = (flags & SYNCH_F_TRY) != 0;
+ const bool read_lock = (flags & SYNCH_F_R) != 0;
+ EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
+ }
+ UnrefSynchEvent(e);
+}
+
+//------------------------------------------------------------------
+
+// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
+// whether it has a timeout, the condition, exclusive/shared, and whether a
+// condition variable wait has an associated Mutex (as opposed to another
+// type of lock). It also points to the PerThreadSynch struct of its thread.
+// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
+//
+// This structure is held on the stack rather than directly in
+// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
+// while waiting on one Mutex, the implementation calls a client callback
+// (such as a Condition function) that acquires another Mutex. We don't
+// strictly need to allow this, but programmers become confused if we do not
+// allow them to use functions such a LOG() within Condition functions. The
+// PerThreadSynch struct points at the most recent SynchWaitParams struct when
+// the thread is on a Mutex's waiter queue.
+struct SynchWaitParams {
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
+ KernelTimeout timeout_arg, Mutex *cvmu_arg,
+ PerThreadSynch *thread_arg,
+ std::atomic<intptr_t> *cv_word_arg)
+ : how(how_arg),
+ cond(cond_arg),
+ timeout(timeout_arg),
+ cvmu(cvmu_arg),
+ thread(thread_arg),
+ cv_word(cv_word_arg),
+ contention_start_cycles(base_internal::CycleClock::Now()) {}
+
+ const Mutex::MuHow how; // How this thread needs to wait.
+ const Condition *cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ KernelTimeout timeout; // timeout expiry---absolute time
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ Mutex *const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch *const thread; // thread that is waiting
+
+ // If not null, thread should be enqueued on the CondVar whose state
+ // word is cv_word instead of queueing normally on the Mutex.
+ std::atomic<intptr_t> *cv_word;
+
+ int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex.
-};
-
-struct SynchLocksHeld {
- int n; // number of valid entries in locks[]
- bool overflow; // true iff we overflowed the array at some point
- struct {
- Mutex *mu; // lock acquired
- int32_t count; // times acquired
- GraphId id; // deadlock_graph id of acquired lock
- } locks[40];
- // If a thread overfills the array during deadlock detection, we
- // continue, discarding information as needed. If no overflow has
- // taken place, we can provide more error checking, such as
- // detecting when a thread releases a lock it does not hold.
-};
-
-// A sentinel value in lists that is not 0.
-// A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
- reinterpret_cast<PerThreadSynch *>(1);
-
-static SynchLocksHeld *LocksHeldAlloc() {
- SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
- base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
- ret->n = 0;
- ret->overflow = false;
- return ret;
-}
-
-// Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
- ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
- return &identity->per_thread_synch;
-}
-
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
- if (mu) {
- ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- }
- PerThreadSynch *w = Synch_GetPerThread();
- if (mu) {
- ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
- }
- return w;
-}
-
-static SynchLocksHeld *Synch_GetAllLocks() {
- PerThreadSynch *s = Synch_GetPerThread();
- if (s->all_locks == nullptr) {
- s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
- }
- return s->all_locks;
-}
-
-// Post on "w"'s associated PerThreadSem.
+};
+
+struct SynchLocksHeld {
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
+ struct {
+ Mutex *mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
+ } locks[40];
+ // If a thread overfills the array during deadlock detection, we
+ // continue, discarding information as needed. If no overflow has
+ // taken place, we can provide more error checking, such as
+ // detecting when a thread releases a lock it does not hold.
+};
+
+// A sentinel value in lists that is not 0.
+// A 0 value is used to mean "not on a list".
+static PerThreadSynch *const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch *>(1);
+
+static SynchLocksHeld *LocksHeldAlloc() {
+ SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
+ ret->n = 0;
+ ret->overflow = false;
+ return ret;
+}
+
+// Return the PerThreadSynch-struct for this thread.
+static PerThreadSynch *Synch_GetPerThread() {
+ ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+ return &identity->per_thread_synch;
+}
+
+static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSynch *w = Synch_GetPerThread();
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return w;
+}
+
+static SynchLocksHeld *Synch_GetAllLocks() {
+ PerThreadSynch *s = Synch_GetPerThread();
+ if (s->all_locks == nullptr) {
+ s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
+ }
+ return s->all_locks;
+}
+
+// Post on "w"'s associated PerThreadSem.
void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
- if (mu) {
- ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- }
- PerThreadSem::Post(w->thread_identity());
- if (mu) {
- ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
- }
-}
-
-// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
- if (mu) {
- ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- }
- assert(w == Synch_GetPerThread());
- static_cast<void>(w);
- bool res = PerThreadSem::Wait(t);
- if (mu) {
- ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
- }
- return res;
-}
-
-// We're in a fatal signal handler that hopes to use Mutex and to get
-// lucky by not deadlocking. We try to improve its chances of success
-// by effectively disabling some of the consistency checks. This will
-// prevent certain ABSL_RAW_CHECK() statements from being triggered when
-// re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
-// Mutex code checking that the "waitp" field has not been reused.
-void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
- // Fix the per-thread state only if it exists.
- ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
- if (identity != nullptr) {
- identity->per_thread_synch.suppress_fatal_errors = true;
- }
- // Don't do deadlock detection when we are already failing.
- synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
- std::memory_order_release);
-}
-
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
-// --------------------------Mutexes
-
-// In the layout below, the msb of the bottom byte is currently unused. Also,
-// the following constraints were considered in choosing the layout:
-// o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
-// 0xcd) are illegal: reader and writer lock both held.
-// o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
-// bit-twiddling trick in Mutex::Unlock().
-// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
-// to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
-static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
-static const intptr_t kMuWait = 0x0004L; // threads are waiting
-static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
-static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
-// INVARIANT1: there's a thread that was blocked on the mutex, is
-// no longer, yet has not yet acquired the mutex. If there's a
-// designated waker, all threads can avoid taking the slow path in
-// unlock because the designated waker will subsequently acquire
-// the lock and wake someone. To maintain INVARIANT1 the bit is
-// set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
- // for a reader
-static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
-static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
-static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
-
-// Hack to make constant values available to gdb pretty printer
-enum {
- kGdbMuSpin = kMuSpin,
- kGdbMuEvent = kMuEvent,
- kGdbMuWait = kMuWait,
- kGdbMuWriter = kMuWriter,
- kGdbMuDesig = kMuDesig,
- kGdbMuWrWait = kMuWrWait,
- kGdbMuReader = kMuReader,
- kGdbMuLow = kMuLow,
-};
-
-// kMuWrWait implies kMuWait.
-// kMuReader and kMuWriter are mutually exclusive.
-// If kMuReader is zero, there are no readers.
-// Otherwise, if kMuWait is zero, the high order bits contain a count of the
-// number of readers. Otherwise, the reader count is held in
-// PerThreadSynch::readers of the most recently queued waiter, again in the
-// bits above kMuLow.
-static const intptr_t kMuOne = 0x0100; // a count of one reader
-
-// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
-static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
-static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
-
-static_assert(PerThreadSynch::kAlignment > kMuLow,
- "PerThreadSynch::kAlignment must be greater than kMuLow");
-
-// This struct contains various bitmasks to be used in
-// acquiring and releasing a mutex in a particular mode.
-struct MuHowS {
- // if all the bits in fast_need_zero are zero, the lock can be acquired by
- // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
- // this is the designated waker.
- intptr_t fast_need_zero;
- intptr_t fast_or;
- intptr_t fast_add;
-
- intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
-
- intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
- // zero a reader can acquire a read share by
- // setting the reader bit and incrementing
- // the reader count (in last waiter since
- // we're now slow-path). kMuWrWait be may
- // be ignored if we already waited once.
-};
-
-static const MuHowS kSharedS = {
- // shared or read lock
- kMuWriter | kMuWait | kMuEvent, // fast_need_zero
- kMuReader, // fast_or
- kMuOne, // fast_add
- kMuWriter | kMuWait, // slow_need_zero
- kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
-};
-static const MuHowS kExclusiveS = {
- // exclusive or write lock
- kMuWriter | kMuReader | kMuEvent, // fast_need_zero
- kMuWriter, // fast_or
- 0, // fast_add
- kMuWriter | kMuReader, // slow_need_zero
- ~static_cast<intptr_t>(0), // slow_inc_need_zero
-};
-static const Mutex::MuHow kShared = &kSharedS; // shared lock
-static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
-
-#ifdef NDEBUG
-static constexpr bool kDebugMode = false;
-#else
-static constexpr bool kDebugMode = true;
-#endif
-
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSem::Post(w->thread_identity());
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+}
+
+// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
+bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ assert(w == Synch_GetPerThread());
+ static_cast<void>(w);
+ bool res = PerThreadSem::Wait(t);
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return res;
+}
+
+// We're in a fatal signal handler that hopes to use Mutex and to get
+// lucky by not deadlocking. We try to improve its chances of success
+// by effectively disabling some of the consistency checks. This will
+// prevent certain ABSL_RAW_CHECK() statements from being triggered when
+// re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
+// Mutex code checking that the "waitp" field has not been reused.
+void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
+ // Fix the per-thread state only if it exists.
+ ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ if (identity != nullptr) {
+ identity->per_thread_synch.suppress_fatal_errors = true;
+ }
+ // Don't do deadlock detection when we are already failing.
+ synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
+ std::memory_order_release);
+}
+
+// --------------------------time support
+
+// Return the current time plus the timeout. Use the same clock as
+// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
+// such a choice when a deadline is given directly.
+static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
+#ifndef _WIN32
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return absl::TimeFromTimeval(tv) + timeout;
+#else
+ return absl::Now() + timeout;
+#endif
+}
+
+// --------------------------Mutexes
+
+// In the layout below, the msb of the bottom byte is currently unused. Also,
+// the following constraints were considered in choosing the layout:
+// o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
+// 0xcd) are illegal: reader and writer lock both held.
+// o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
+// bit-twiddling trick in Mutex::Unlock().
+// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
+// to enable the bit-twiddling trick in CheckForMutexCorruption().
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// INVARIANT1: there's a thread that was blocked on the mutex, is
+// no longer, yet has not yet acquired the mutex. If there's a
+// designated waker, all threads can avoid taking the slow path in
+// unlock because the designated waker will subsequently acquire
+// the lock and wake someone. To maintain INVARIANT1 the bit is
+// set when a thread is unblocked(INV1a), and threads that were
+// unblocked reset the bit when they either acquire or re-block
+// (INV1b).
+static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
+ // for a reader
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+
+// Hack to make constant values available to gdb pretty printer
+enum {
+ kGdbMuSpin = kMuSpin,
+ kGdbMuEvent = kMuEvent,
+ kGdbMuWait = kMuWait,
+ kGdbMuWriter = kMuWriter,
+ kGdbMuDesig = kMuDesig,
+ kGdbMuWrWait = kMuWrWait,
+ kGdbMuReader = kMuReader,
+ kGdbMuLow = kMuLow,
+};
+
+// kMuWrWait implies kMuWait.
+// kMuReader and kMuWriter are mutually exclusive.
+// If kMuReader is zero, there are no readers.
+// Otherwise, if kMuWait is zero, the high order bits contain a count of the
+// number of readers. Otherwise, the reader count is held in
+// PerThreadSynch::readers of the most recently queued waiter, again in the
+// bits above kMuLow.
+static const intptr_t kMuOne = 0x0100; // a count of one reader
+
+// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
+static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
+static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
+
+static_assert(PerThreadSynch::kAlignment > kMuLow,
+ "PerThreadSynch::kAlignment must be greater than kMuLow");
+
+// This struct contains various bitmasks to be used in
+// acquiring and releasing a mutex in a particular mode.
+struct MuHowS {
+ // if all the bits in fast_need_zero are zero, the lock can be acquired by
+ // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
+ // this is the designated waker.
+ intptr_t fast_need_zero;
+ intptr_t fast_or;
+ intptr_t fast_add;
+
+ intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
+
+ intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
+ // zero a reader can acquire a read share by
+ // setting the reader bit and incrementing
+ // the reader count (in last waiter since
+ // we're now slow-path). kMuWrWait be may
+ // be ignored if we already waited once.
+};
+
+static const MuHowS kSharedS = {
+ // shared or read lock
+ kMuWriter | kMuWait | kMuEvent, // fast_need_zero
+ kMuReader, // fast_or
+ kMuOne, // fast_add
+ kMuWriter | kMuWait, // slow_need_zero
+ kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
+};
+static const MuHowS kExclusiveS = {
+ // exclusive or write lock
+ kMuWriter | kMuReader | kMuEvent, // fast_need_zero
+ kMuWriter, // fast_or
+ 0, // fast_add
+ kMuWriter | kMuReader, // slow_need_zero
+ ~static_cast<intptr_t>(0), // slow_inc_need_zero
+};
+static const Mutex::MuHow kShared = &kSharedS; // shared lock
+static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
+
+#ifdef NDEBUG
+static constexpr bool kDebugMode = false;
+#else
+static constexpr bool kDebugMode = true;
+#endif
+
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-static unsigned TsanFlags(Mutex::MuHow how) {
- return how == kShared ? __tsan_mutex_read_lock : 0;
-}
-#endif
-
-static bool DebugOnlyIsExiting() {
- return false;
-}
-
-Mutex::~Mutex() {
- intptr_t v = mu_.load(std::memory_order_relaxed);
- if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
- ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
- }
- if (kDebugMode) {
- this->ForgetDeadlockInfo();
- }
- ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
-}
-
-void Mutex::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
- e->log = true;
- UnrefSynchEvent(e);
-}
-
-void EnableMutexInvariantDebugging(bool enabled) {
- synch_check_invariants.store(enabled, std::memory_order_release);
-}
-
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
- void *arg) {
- if (synch_check_invariants.load(std::memory_order_acquire) &&
- invariant != nullptr) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
- e->invariant = invariant;
- e->arg = arg;
- UnrefSynchEvent(e);
- }
-}
-
-void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
- synch_deadlock_detection.store(mode, std::memory_order_release);
-}
-
+static unsigned TsanFlags(Mutex::MuHow how) {
+ return how == kShared ? __tsan_mutex_read_lock : 0;
+}
+#endif
+
+static bool DebugOnlyIsExiting() {
+ return false;
+}
+
+Mutex::~Mutex() {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
+ }
+ if (kDebugMode) {
+ this->ForgetDeadlockInfo();
+ }
+ ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
+}
+
+void Mutex::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+void EnableMutexInvariantDebugging(bool enabled) {
+ synch_check_invariants.store(enabled, std::memory_order_release);
+}
+
+void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
+ void *arg) {
+ if (synch_check_invariants.load(std::memory_order_acquire) &&
+ invariant != nullptr) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ e->invariant = invariant;
+ e->arg = arg;
+ UnrefSynchEvent(e);
+ }
+}
+
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
+ synch_deadlock_detection.store(mode, std::memory_order_release);
+}
+
// Return true iff threads x and y are part of the same equivalence
// class of waiters. An equivalence class is defined as the set of
// waiters with the same condition, type of lock, and thread priority.
@@ -770,1982 +770,1982 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
// Requires that x and y be waiting on the same Mutex queue.
static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
- Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
-}
-
-// Given the contents of a mutex word containing a PerThreadSynch pointer,
-// return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
- return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
-}
-
-// The next several routines maintain the per-thread next and skip fields
-// used in the Mutex waiter queue.
-// The queue is a circular singly-linked list, of which the "head" is the
-// last element, and head->next if the first element.
-// The skip field has the invariant:
-// For thread x, x->skip is one of:
-// - invalid (iff x is not in a Mutex wait queue),
-// - null, or
-// - a pointer to a distinct thread waiting later in the same Mutex queue
+ Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
+}
+
+// Given the contents of a mutex word containing a PerThreadSynch pointer,
+// return the pointer.
+static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+}
+
+// The next several routines maintain the per-thread next and skip fields
+// used in the Mutex waiter queue.
+// The queue is a circular singly-linked list, of which the "head" is the
+// last element, and head->next if the first element.
+// The skip field has the invariant:
+// For thread x, x->skip is one of:
+// - invalid (iff x is not in a Mutex wait queue),
+// - null, or
+// - a pointer to a distinct thread waiting later in the same Mutex queue
// such that all threads in [x, x->skip] have the same condition, priority
// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
// x->skip]).
-// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
-//
+// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
+//
// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
-// first runnable thread y from the front a Mutex queue to adjust the skip
-// field of another thread x because if x->skip==y, x->skip must (have) become
-// invalid before y is removed. The function TryRemove can remove a specified
-// thread from an arbitrary position in the queue whether runnable or not, so
-// it fixes up skip fields that would otherwise be left dangling.
-// The statement
+// first runnable thread y from the front a Mutex queue to adjust the skip
+// field of another thread x because if x->skip==y, x->skip must (have) become
+// invalid before y is removed. The function TryRemove can remove a specified
+// thread from an arbitrary position in the queue whether runnable or not, so
+// it fixes up skip fields that would otherwise be left dangling.
+// The statement
// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
-// maintains the invariant provided x is not the last waiter in a Mutex queue
-// The statement
-// if (x->skip != null) { x->skip = x->skip->skip; }
-// maintains the invariant.
-
-// Returns the last thread y in a mutex waiter queue such that all threads in
-// [x, y] inclusive share the same condition. Sets skip fields of some threads
-// in that range to optimize future evaluation of Skip() on x values in
-// the range. Requires thread x is in a mutex waiter queue.
-// The locking is unusual. Skip() is called under these conditions:
-// - spinlock is held in call from Enqueue(), with maybe_unlocking == false
-// - Mutex is held in call from UnlockSlow() by last unlocker, with
-// maybe_unlocking == true
-// - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
-// UnlockSlow()) and TryRemove()
-// These cases are mutually exclusive, so Skip() never runs concurrently
-// with itself on the same Mutex. The skip chain is used in these other places
-// that cannot occur concurrently:
-// - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
-// - Dequeue() (with spinlock and Mutex held)
-// - UnlockSlow() (with spinlock and Mutex held)
-// A more complex case is Enqueue()
-// - Enqueue() (with spinlock held and maybe_unlocking == false)
-// This is the first case in which Skip is called, above.
-// - Enqueue() (without spinlock held; but queue is empty and being freshly
-// formed)
-// - Enqueue() (with spinlock held and maybe_unlocking == true)
-// The first case has mutual exclusion, and the second isolation through
-// working on an otherwise unreachable data structure.
-// In the last case, Enqueue() is required to change no skip/next pointers
-// except those in the added node and the former "head" node. This implies
-// that the new node is added after head, and so must be the new head or the
-// new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
- PerThreadSynch *x0 = nullptr;
- PerThreadSynch *x1 = x;
- PerThreadSynch *x2 = x->skip;
- if (x2 != nullptr) {
- // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
- // such that x1 == x0->skip && x2 == x1->skip
- while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
- x0->skip = x2; // short-circuit skip from x0 to x2
- }
- x->skip = x1; // short-circuit skip from x to result
- }
- return x1;
-}
-
-// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
-// The latter is going to be removed out of order, because of a timeout.
-// Check whether "ancestor" has a skip field pointing to "to_be_removed",
-// and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
- if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
- if (to_be_removed->skip != nullptr) {
- ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
- } else if (ancestor->next != to_be_removed) { // they are not adjacent
- ancestor->skip = ancestor->next; // can skip one past ancestor
- } else {
- ancestor->skip = nullptr; // can't skip at all
- }
- }
-}
-
-static void CondVarEnqueue(SynchWaitParams *waitp);
-
-// Enqueue thread "waitp->thread" on a waiter queue.
-// Called with mutex spinlock held if head != nullptr
-// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
-// idempotent; it alters no state associated with the existing (empty)
-// queue.
-//
-// If waitp->cv_word == nullptr, queue the thread at either the front or
-// the end (according to its priority) of the circular mutex waiter queue whose
-// head is "head", and return the new head. mu is the previous mutex state,
-// which contains the reader count (perhaps adjusted for the operation in
-// progress) if the list was empty and a read lock held, and the holder hint if
-// the list was empty and a write lock held. (flags & kMuIsCond) indicates
-// whether this thread was transferred from a CondVar or is waiting for a
-// non-trivial condition. In this case, Enqueue() never returns nullptr
-//
-// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
-// returned. This mechanism is used by CondVar to queue a thread on the
-// condition variable queue instead of the mutex queue in implementing Wait().
-// In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
- SynchWaitParams *waitp, intptr_t mu, int flags) {
- // If we have been given a cv_word, call CondVarEnqueue() and return
- // the previous head of the Mutex waiter queue.
- if (waitp->cv_word != nullptr) {
- CondVarEnqueue(waitp);
- return head;
- }
-
- PerThreadSynch *s = waitp->thread;
- ABSL_RAW_CHECK(
- s->waitp == nullptr || // normal case
- s->waitp == waitp || // Fer()---transfer from condition variable
- s->suppress_fatal_errors,
- "detected illegal recursion into Mutex code");
- s->waitp = waitp;
- s->skip = nullptr; // maintain skip invariant (see above)
- s->may_skip = true; // always true on entering queue
- s->wake = false; // not being woken
- s->cond_waiter = ((flags & kMuIsCond) != 0);
- if (head == nullptr) { // s is the only waiter
- s->next = s; // it's the only entry in the cycle
- s->readers = mu; // reader count is from mu word
- s->maybe_unlocking = false; // no one is searching an empty list
- head = s; // s is new head
- } else {
- PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
-#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = base_internal::CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // base_internal::CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles +
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
- }
- }
- if (s->priority > head->priority) { // s's priority is above head's
- // try to put s in priority-fifo order, or failing that at the front.
- if (!head->maybe_unlocking) {
+// maintains the invariant provided x is not the last waiter in a Mutex queue
+// The statement
+// if (x->skip != null) { x->skip = x->skip->skip; }
+// maintains the invariant.
+
+// Returns the last thread y in a mutex waiter queue such that all threads in
+// [x, y] inclusive share the same condition. Sets skip fields of some threads
+// in that range to optimize future evaluation of Skip() on x values in
+// the range. Requires thread x is in a mutex waiter queue.
+// The locking is unusual. Skip() is called under these conditions:
+// - spinlock is held in call from Enqueue(), with maybe_unlocking == false
+// - Mutex is held in call from UnlockSlow() by last unlocker, with
+// maybe_unlocking == true
+// - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
+// UnlockSlow()) and TryRemove()
+// These cases are mutually exclusive, so Skip() never runs concurrently
+// with itself on the same Mutex. The skip chain is used in these other places
+// that cannot occur concurrently:
+// - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
+// - Dequeue() (with spinlock and Mutex held)
+// - UnlockSlow() (with spinlock and Mutex held)
+// A more complex case is Enqueue()
+// - Enqueue() (with spinlock held and maybe_unlocking == false)
+// This is the first case in which Skip is called, above.
+// - Enqueue() (without spinlock held; but queue is empty and being freshly
+// formed)
+// - Enqueue() (with spinlock held and maybe_unlocking == true)
+// The first case has mutual exclusion, and the second isolation through
+// working on an otherwise unreachable data structure.
+// In the last case, Enqueue() is required to change no skip/next pointers
+// except those in the added node and the former "head" node. This implies
+// that the new node is added after head, and so must be the new head or the
+// new front of the queue.
+static PerThreadSynch *Skip(PerThreadSynch *x) {
+ PerThreadSynch *x0 = nullptr;
+ PerThreadSynch *x1 = x;
+ PerThreadSynch *x2 = x->skip;
+ if (x2 != nullptr) {
+ // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
+ // such that x1 == x0->skip && x2 == x1->skip
+ while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
+ x0->skip = x2; // short-circuit skip from x0 to x2
+ }
+ x->skip = x1; // short-circuit skip from x to result
+ }
+ return x1;
+}
+
+// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
+// The latter is going to be removed out of order, because of a timeout.
+// Check whether "ancestor" has a skip field pointing to "to_be_removed",
+// and fix it if it does.
+static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+ if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
+ if (to_be_removed->skip != nullptr) {
+ ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
+ } else if (ancestor->next != to_be_removed) { // they are not adjacent
+ ancestor->skip = ancestor->next; // can skip one past ancestor
+ } else {
+ ancestor->skip = nullptr; // can't skip at all
+ }
+ }
+}
+
+static void CondVarEnqueue(SynchWaitParams *waitp);
+
+// Enqueue thread "waitp->thread" on a waiter queue.
+// Called with mutex spinlock held if head != nullptr
+// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
+// idempotent; it alters no state associated with the existing (empty)
+// queue.
+//
+// If waitp->cv_word == nullptr, queue the thread at either the front or
+// the end (according to its priority) of the circular mutex waiter queue whose
+// head is "head", and return the new head. mu is the previous mutex state,
+// which contains the reader count (perhaps adjusted for the operation in
+// progress) if the list was empty and a read lock held, and the holder hint if
+// the list was empty and a write lock held. (flags & kMuIsCond) indicates
+// whether this thread was transferred from a CondVar or is waiting for a
+// non-trivial condition. In this case, Enqueue() never returns nullptr
+//
+// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
+// returned. This mechanism is used by CondVar to queue a thread on the
+// condition variable queue instead of the mutex queue in implementing Wait().
+// In this case, Enqueue() can return nullptr (if head==nullptr).
+static PerThreadSynch *Enqueue(PerThreadSynch *head,
+ SynchWaitParams *waitp, intptr_t mu, int flags) {
+ // If we have been given a cv_word, call CondVarEnqueue() and return
+ // the previous head of the Mutex waiter queue.
+ if (waitp->cv_word != nullptr) {
+ CondVarEnqueue(waitp);
+ return head;
+ }
+
+ PerThreadSynch *s = waitp->thread;
+ ABSL_RAW_CHECK(
+ s->waitp == nullptr || // normal case
+ s->waitp == waitp || // Fer()---transfer from condition variable
+ s->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ s->waitp = waitp;
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
+ s->cond_waiter = ((flags & kMuIsCond) != 0);
+ if (head == nullptr) { // s is the only waiter
+ s->next = s; // it's the only entry in the cycle
+ s->readers = mu; // reader count is from mu word
+ s->maybe_unlocking = false; // no one is searching an empty list
+ head = s; // s is new head
+ } else {
+ PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = base_internal::CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // base_internal::CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles +
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
+ }
+ }
+ if (s->priority > head->priority) { // s's priority is above head's
+ // try to put s in priority-fifo order, or failing that at the front.
+ if (!head->maybe_unlocking) {
// No unlocker can be scanning the queue, so we can insert into the
// middle of the queue.
//
// Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued.
- PerThreadSynch *advance_to = head; // next value of enqueue_after
- do {
- enqueue_after = advance_to;
+ PerThreadSynch *advance_to = head; // next value of enqueue_after
+ do {
+ enqueue_after = advance_to;
// (side-effect: optimizes skip chain)
advance_to = Skip(enqueue_after->next);
- } while (s->priority <= advance_to->priority);
- // termination guaranteed because s->priority > head->priority
- // and head is the end of a skip chain
- } else if (waitp->how == kExclusive &&
- Condition::GuaranteedEqual(waitp->cond, nullptr)) {
- // An unlocker could be scanning the queue, but we know it will recheck
- // the queue front for writers that have no condition, which is what s
- // is, so an insert at front is safe.
- enqueue_after = head; // add after head, at front
- }
- }
-#endif
- if (enqueue_after != nullptr) {
- s->next = enqueue_after->next;
- enqueue_after->next = s;
-
- // enqueue_after can be: head, Skip(...), or cur.
- // The first two imply enqueue_after->skip == nullptr, and
+ } while (s->priority <= advance_to->priority);
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
+ } else if (waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(waitp->cond, nullptr)) {
+ // An unlocker could be scanning the queue, but we know it will recheck
+ // the queue front for writers that have no condition, which is what s
+ // is, so an insert at front is safe.
+ enqueue_after = head; // add after head, at front
+ }
+ }
+#endif
+ if (enqueue_after != nullptr) {
+ s->next = enqueue_after->next;
+ enqueue_after->next = s;
+
+ // enqueue_after can be: head, Skip(...), or cur.
+ // The first two imply enqueue_after->skip == nullptr, and
// the last is used only if MuEquivalentWaiter(s, cur).
- // We require this because clearing enqueue_after->skip
- // is impossible; enqueue_after's predecessors might also
- // incorrectly skip over s if we were to allow other
- // insertion points.
+ // We require this because clearing enqueue_after->skip
+ // is impossible; enqueue_after's predecessors might also
+ // incorrectly skip over s if we were to allow other
+ // insertion points.
ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
MuEquivalentWaiter(enqueue_after, s),
"Mutex Enqueue failure");
-
- if (enqueue_after != head && enqueue_after->may_skip &&
+
+ if (enqueue_after != head && enqueue_after->may_skip &&
MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
- // enqueue_after can skip to its new successor, s
- enqueue_after->skip = enqueue_after->next;
- }
+ // enqueue_after can skip to its new successor, s
+ enqueue_after->skip = enqueue_after->next;
+ }
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
- s->skip = s->next; // s may skip to its successor
- }
- } else { // enqueue not done any other way, so
- // we're inserting s at the back
- // s will become new head; copy data from head into it
- s->next = head->next; // add s after head
- head->next = s;
- s->readers = head->readers; // reader count is from previous head
- s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
+ s->skip = s->next; // s may skip to its successor
+ }
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
+ // s will become new head; copy data from head into it
+ s->next = head->next; // add s after head
+ head->next = s;
+ s->readers = head->readers; // reader count is from previous head
+ s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
if (head->may_skip && MuEquivalentWaiter(head, s)) {
- // head now has successor; may skip
- head->skip = s;
- }
- head = s; // s is new head
- }
- }
- s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
- return head;
-}
-
-// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
-// whose last element is head. The new head element is returned, or null
-// if the list is made empty.
-// Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
- PerThreadSynch *w = pw->next;
- pw->next = w->next; // snip w out of list
- if (head == w) { // we removed the head
- head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
+ // head now has successor; may skip
+ head->skip = s;
+ }
+ head = s; // s is new head
+ }
+ }
+ s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
+ return head;
+}
+
+// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
+// whose last element is head. The new head element is returned, or null
+// if the list is made empty.
+// Dequeue is called with both spinlock and Mutex held.
+static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
+ PerThreadSynch *w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
+ head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
} else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
- // pw can skip to its new successor
- if (pw->next->skip !=
- nullptr) { // either skip to its successors skip target
- pw->skip = pw->next->skip;
- } else { // or to pw's successor
- pw->skip = pw->next;
- }
- }
- return head;
-}
-
-// Traverse the elements [ pw->next, h] of the circular list whose last element
-// is head.
-// Remove all elements with wake==true and place them in the
-// singly-linked list wake_list in the order found. Assumes that
-// there is only one such element if the element has how == kExclusive.
-// Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
- PerThreadSynch *pw,
- PerThreadSynch **wake_tail) {
- PerThreadSynch *orig_h = head;
- PerThreadSynch *w = pw->next;
- bool skipped = false;
- do {
- if (w->wake) { // remove this element
- ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
- // we're removing pw's successor so either pw->skip is zero or we should
- // already have removed pw since if pw->skip!=null, pw has the same
- // condition as w.
- head = Dequeue(head, pw);
- w->next = *wake_tail; // keep list terminated
- *wake_tail = w; // add w to wake_list;
- wake_tail = &w->next; // next addition to end
- if (w->waitp->how == kExclusive) { // wake at most 1 writer
- break;
- }
- } else { // not waking this one; skip
- pw = Skip(w); // skip as much as possible
- skipped = true;
- }
- w = pw->next;
- // We want to stop processing after we've considered the original head,
- // orig_h. We can't test for w==orig_h in the loop because w may skip over
- // it; we are guaranteed only that w's predecessor will not skip over
- // orig_h. When we've considered orig_h, either we've processed it and
- // removed it (so orig_h != head), or we considered it and skipped it (so
- // skipped==true && pw == head because skipping from head always skips by
- // just one, leaving pw pointing at head). So we want to
- // continue the loop with the negation of that expression.
- } while (orig_h == head && (pw != head || !skipped));
- return head;
-}
-
-// Try to remove thread s from the list of waiters on this mutex.
-// Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+ // pw can skip to its new successor
+ if (pw->next->skip !=
+ nullptr) { // either skip to its successors skip target
+ pw->skip = pw->next->skip;
+ } else { // or to pw's successor
+ pw->skip = pw->next;
+ }
+ }
+ return head;
+}
+
+// Traverse the elements [ pw->next, h] of the circular list whose last element
+// is head.
+// Remove all elements with wake==true and place them in the
+// singly-linked list wake_list in the order found. Assumes that
+// there is only one such element if the element has how == kExclusive.
+// Return the new head.
+static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
+ PerThreadSynch *pw,
+ PerThreadSynch **wake_tail) {
+ PerThreadSynch *orig_h = head;
+ PerThreadSynch *w = pw->next;
+ bool skipped = false;
+ do {
+ if (w->wake) { // remove this element
+ ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
+ // we're removing pw's successor so either pw->skip is zero or we should
+ // already have removed pw since if pw->skip!=null, pw has the same
+ // condition as w.
+ head = Dequeue(head, pw);
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
+ if (w->waitp->how == kExclusive) { // wake at most 1 writer
+ break;
+ }
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
+ skipped = true;
+ }
+ w = pw->next;
+ // We want to stop processing after we've considered the original head,
+ // orig_h. We can't test for w==orig_h in the loop because w may skip over
+ // it; we are guaranteed only that w's predecessor will not skip over
+ // orig_h. When we've considered orig_h, either we've processed it and
+ // removed it (so orig_h != head), or we considered it and skipped it (so
+ // skipped==true && pw == head because skipping from head always skips by
+ // just one, leaving pw pointing at head). So we want to
+ // continue the loop with the negation of that expression.
+ } while (orig_h == head && (pw != head || !skipped));
+ return head;
+}
+
+// Try to remove thread s from the list of waiters on this mutex.
+// Does nothing if s is not on the waiter list.
+void Mutex::TryRemove(PerThreadSynch *s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
- intptr_t v = mu_.load(std::memory_order_relaxed);
- // acquire spinlock & lock
- if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
- mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- if (h != nullptr) {
- PerThreadSynch *pw = h; // pw is w's predecessor
- PerThreadSynch *w;
- if ((w = pw->next) != s) { // search for thread,
- do { // processing at least one element
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // acquire spinlock & lock
+ if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if (h != nullptr) {
+ PerThreadSynch *pw = h; // pw is w's predecessor
+ PerThreadSynch *w;
+ if ((w = pw->next) != s) { // search for thread,
+ do { // processing at least one element
// If the current element isn't equivalent to the waiter to be
// removed, we can skip the entire chain.
if (!MuEquivalentWaiter(s, w)) {
- pw = Skip(w); // so skip all that won't match
- // we don't have to worry about dangling skip fields
- // in the threads we skipped; none can point to s
+ pw = Skip(w); // so skip all that won't match
+ // we don't have to worry about dangling skip fields
+ // in the threads we skipped; none can point to s
// because they are in a different equivalence class.
- } else { // seeking same condition
- FixSkip(w, s); // fix up any skip pointer from w to s
- pw = w;
- }
- // don't search further if we found the thread, or we're about to
- // process the first thread again.
- } while ((w = pw->next) != s && pw != h);
- }
- if (w == s) { // found thread; remove it
- // pw->skip may be non-zero here; the loop above ensured that
- // no ancestor of s can skip to s, so removal is safe anyway.
- h = Dequeue(h, pw);
- s->next = nullptr;
- s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
- }
- }
- intptr_t nv;
- do { // release spinlock and lock
- v = mu_.load(std::memory_order_relaxed);
- nv = v & (kMuDesig | kMuEvent);
- if (h != nullptr) {
- nv |= kMuWait | reinterpret_cast<intptr_t>(h);
- h->readers = 0; // we hold writer lock
- h->maybe_unlocking = false; // finished unlocking
- }
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
- std::memory_order_relaxed));
- }
-}
-
-// Wait until thread "s", which must be the current thread, is removed from the
-// this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
-// if the wait extends past the absolute time specified, even if "s" is still
-// on the mutex queue. In this case, remove "s" from the queue and return
-// true, otherwise return false.
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
- while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
- if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
- // After a timeout, we go into a spin loop until we remove ourselves
- // from the queue, or someone else removes us. We can't be sure to be
- // able to remove ourselves in a single lock acquisition because this
- // mutex may be held, and the holder has the right to read the centre
- // of the waiter queue without holding the spinlock.
- this->TryRemove(s);
- int c = 0;
- while (s->next != nullptr) {
+ } else { // seeking same condition
+ FixSkip(w, s); // fix up any skip pointer from w to s
+ pw = w;
+ }
+ // don't search further if we found the thread, or we're about to
+ // process the first thread again.
+ } while ((w = pw->next) != s && pw != h);
+ }
+ if (w == s) { // found thread; remove it
+ // pw->skip may be non-zero here; the loop above ensured that
+ // no ancestor of s can skip to s, so removal is safe anyway.
+ h = Dequeue(h, pw);
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ intptr_t nv;
+ do { // release spinlock and lock
+ v = mu_.load(std::memory_order_relaxed);
+ nv = v & (kMuDesig | kMuEvent);
+ if (h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(h);
+ h->readers = 0; // we hold writer lock
+ h->maybe_unlocking = false; // finished unlocking
+ }
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ }
+}
+
+// Wait until thread "s", which must be the current thread, is removed from the
+// this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
+// if the wait extends past the absolute time specified, even if "s" is still
+// on the mutex queue. In this case, remove "s" from the queue and return
+// true, otherwise return false.
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
+ while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
+ if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
+ // After a timeout, we go into a spin loop until we remove ourselves
+ // from the queue, or someone else removes us. We can't be sure to be
+ // able to remove ourselves in a single lock acquisition because this
+ // mutex may be held, and the holder has the right to read the centre
+ // of the waiter queue without holding the spinlock.
+ this->TryRemove(s);
+ int c = 0;
+ while (s->next != nullptr) {
c = synchronization_internal::MutexDelay(c, GENTLE);
- this->TryRemove(s);
- }
- if (kDebugMode) {
- // This ensures that we test the case that TryRemove() is called when s
- // is not on the queue.
- this->TryRemove(s);
- }
- s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
- s->waitp->cond = nullptr; // condition no longer relevant for wakeups
- }
- }
- ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
- "detected illegal recursion in Mutex code");
- s->waitp = nullptr;
-}
-
-// Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
- PerThreadSynch *next = w->next;
- w->next = nullptr;
- w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
- IncrementSynchSem(this, w);
-
- return next;
-}
-
-static GraphId GetGraphIdLocked(Mutex *mu)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
- if (!deadlock_graph) { // (re)create the deadlock graph.
- deadlock_graph =
- new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
- GraphCycles;
- }
- return deadlock_graph->GetId(mu);
-}
-
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
- deadlock_graph_mu.Lock();
- GraphId id = GetGraphIdLocked(mu);
- deadlock_graph_mu.Unlock();
- return id;
-}
-
-// Record a lock acquisition. This is used in debug mode for deadlock
-// detection. The held_locks pointer points to the relevant data
-// structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
- int n = held_locks->n;
- int i = 0;
- while (i != n && held_locks->locks[i].id != id) {
- i++;
- }
- if (i == n) {
- if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
- held_locks->overflow = true; // lost some data
- } else { // we have room for lock
- held_locks->locks[i].mu = mu;
- held_locks->locks[i].count = 1;
- held_locks->locks[i].id = id;
- held_locks->n = n + 1;
- }
- } else {
- held_locks->locks[i].count++;
- }
-}
-
-// Record a lock release. Each call to LockEnter(mu, id, x) should be
-// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
-// It does not process the event if is not needed when deadlock detection is
-// disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
- int n = held_locks->n;
- int i = 0;
- while (i != n && held_locks->locks[i].id != id) {
- i++;
- }
- if (i == n) {
- if (!held_locks->overflow) {
- // The deadlock id may have been reassigned after ForgetDeadlockInfo,
- // but in that case mu should still be present.
- i = 0;
- while (i != n && held_locks->locks[i].mu != mu) {
- i++;
- }
- if (i == n) { // mu missing means releasing unheld lock
- SynchEvent *mu_events = GetSynchEvent(mu);
- ABSL_RAW_LOG(FATAL,
- "thread releasing lock it does not hold: %p %s; "
- ,
- static_cast<void *>(mu),
- mu_events == nullptr ? "" : mu_events->name);
- }
- }
- } else if (held_locks->locks[i].count == 1) {
- held_locks->n = n - 1;
- held_locks->locks[i] = held_locks->locks[n - 1];
- held_locks->locks[n - 1].id = InvalidGraphId();
- held_locks->locks[n - 1].mu =
- nullptr; // clear mu to please the leak detector.
- } else {
- assert(held_locks->locks[i].count > 0);
- held_locks->locks[i].count--;
- }
-}
-
-// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
- if (kDebugMode) {
- if (synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
- }
- }
-}
-
-// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
- if (kDebugMode) {
- if (synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- LockEnter(mu, id, Synch_GetAllLocks());
- }
- }
-}
-
-// Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
- if (kDebugMode) {
- if (synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
- }
- }
-}
-
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
- bool symbolize) {
- static const int kSymLen = 200;
- char sym[kSymLen];
- int len = 0;
- for (int i = 0; i != n; i++) {
- if (symbolize) {
- if (!symbolizer(pcs[i], sym, kSymLen)) {
- sym[0] = '\0';
- }
- snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
- (i == 0 ? "\n" : ""),
- pcs[i], sym);
- } else {
- snprintf(buf + len, maxlen - len, " %p", pcs[i]);
- }
- len += strlen(&buf[len]);
- }
- return buf;
-}
-
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
- void *pcs[40];
- return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
- maxlen, symbolize);
-}
-
-namespace {
-enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
- // a path this long would be remarkable
-// Buffers required to report a deadlock.
-// We do not allocate them on stack to avoid large stack frame.
-struct DeadlockReportBuffers {
- char buf[6100];
- GraphId path[kMaxDeadlockPathLen];
-};
-
-struct ScopedDeadlockReportBuffers {
- ScopedDeadlockReportBuffers() {
- b = reinterpret_cast<DeadlockReportBuffers *>(
- base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
- }
- ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
- DeadlockReportBuffers *b;
-};
-
-// Helper to pass to GraphCycles::UpdateStackTrace.
-int GetStack(void** stack, int max_depth) {
- return absl::GetStackTrace(stack, max_depth, 3);
-}
-} // anonymous namespace
-
-// Called in debug mode when a thread is about to acquire a lock in a way that
-// may block.
-static GraphId DeadlockCheck(Mutex *mu) {
- if (synch_deadlock_detection.load(std::memory_order_acquire) ==
- OnDeadlockCycle::kIgnore) {
- return InvalidGraphId();
- }
-
- SynchLocksHeld *all_locks = Synch_GetAllLocks();
-
- absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
- const GraphId mu_id = GetGraphIdLocked(mu);
-
- if (all_locks->n == 0) {
- // There are no other locks held. Return now so that we don't need to
- // call GetSynchEvent(). This way we do not record the stack trace
- // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
- // it can't always be the first lock acquired by a thread.
- return mu_id;
- }
-
- // We prefer to keep stack traces that show a thread holding and acquiring
- // as many locks as possible. This increases the chances that a given edge
- // in the acquires-before graph will be represented in the stack traces
- // recorded for the locks.
- deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
-
- // For each other mutex already held by this thread:
- for (int i = 0; i != all_locks->n; i++) {
- const GraphId other_node_id = all_locks->locks[i].id;
- const Mutex *other =
- static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
- if (other == nullptr) {
- // Ignore stale lock
- continue;
- }
-
- // Add the acquired-before edge to the graph.
- if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
- ScopedDeadlockReportBuffers scoped_buffers;
- DeadlockReportBuffers *b = scoped_buffers.b;
- static int number_of_reported_deadlocks = 0;
- number_of_reported_deadlocks++;
- // Symbolize only 2 first deadlock report to avoid huge slowdowns.
- bool symbolize = number_of_reported_deadlocks <= 2;
- ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
- CurrentStackString(b->buf, sizeof (b->buf), symbolize));
- int len = 0;
- for (int j = 0; j != all_locks->n; j++) {
- void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
- if (pr != nullptr) {
- snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
- len += static_cast<int>(strlen(&b->buf[len]));
- }
- }
+ this->TryRemove(s);
+ }
+ if (kDebugMode) {
+ // This ensures that we test the case that TryRemove() is called when s
+ // is not on the queue.
+ this->TryRemove(s);
+ }
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->cond = nullptr; // condition no longer relevant for wakeups
+ }
+ }
+ ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
+ "detected illegal recursion in Mutex code");
+ s->waitp = nullptr;
+}
+
+// Wake thread w, and return the next thread in the list.
+PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
+ PerThreadSynch *next = w->next;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+
+ return next;
+}
+
+static GraphId GetGraphIdLocked(Mutex *mu)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
+ if (!deadlock_graph) { // (re)create the deadlock graph.
+ deadlock_graph =
+ new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
+ GraphCycles;
+ }
+ return deadlock_graph->GetId(mu);
+}
+
+static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+ deadlock_graph_mu.Lock();
+ GraphId id = GetGraphIdLocked(mu);
+ deadlock_graph_mu.Unlock();
+ return id;
+}
+
+// Record a lock acquisition. This is used in debug mode for deadlock
+// detection. The held_locks pointer points to the relevant data
+// structure for each case.
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
+ held_locks->overflow = true; // lost some data
+ } else { // we have room for lock
+ held_locks->locks[i].mu = mu;
+ held_locks->locks[i].count = 1;
+ held_locks->locks[i].id = id;
+ held_locks->n = n + 1;
+ }
+ } else {
+ held_locks->locks[i].count++;
+ }
+}
+
+// Record a lock release. Each call to LockEnter(mu, id, x) should be
+// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
+// It does not process the event if is not needed when deadlock detection is
+// disabled.
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (!held_locks->overflow) {
+ // The deadlock id may have been reassigned after ForgetDeadlockInfo,
+ // but in that case mu should still be present.
+ i = 0;
+ while (i != n && held_locks->locks[i].mu != mu) {
+ i++;
+ }
+ if (i == n) { // mu missing means releasing unheld lock
+ SynchEvent *mu_events = GetSynchEvent(mu);
+ ABSL_RAW_LOG(FATAL,
+ "thread releasing lock it does not hold: %p %s; "
+ ,
+ static_cast<void *>(mu),
+ mu_events == nullptr ? "" : mu_events->name);
+ }
+ }
+ } else if (held_locks->locks[i].count == 1) {
+ held_locks->n = n - 1;
+ held_locks->locks[i] = held_locks->locks[n - 1];
+ held_locks->locks[n - 1].id = InvalidGraphId();
+ held_locks->locks[n - 1].mu =
+ nullptr; // clear mu to please the leak detector.
+ } else {
+ assert(held_locks->locks[i].count > 0);
+ held_locks->locks[i].count--;
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, id, Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockLeave() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockLeave(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+static char *StackString(void **pcs, int n, char *buf, int maxlen,
+ bool symbolize) {
+ static const int kSymLen = 200;
+ char sym[kSymLen];
+ int len = 0;
+ for (int i = 0; i != n; i++) {
+ if (symbolize) {
+ if (!symbolizer(pcs[i], sym, kSymLen)) {
+ sym[0] = '\0';
+ }
+ snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
+ (i == 0 ? "\n" : ""),
+ pcs[i], sym);
+ } else {
+ snprintf(buf + len, maxlen - len, " %p", pcs[i]);
+ }
+ len += strlen(&buf[len]);
+ }
+ return buf;
+}
+
+static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
+ void *pcs[40];
+ return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
+ maxlen, symbolize);
+}
+
+namespace {
+enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
+// Buffers required to report a deadlock.
+// We do not allocate them on stack to avoid large stack frame.
+struct DeadlockReportBuffers {
+ char buf[6100];
+ GraphId path[kMaxDeadlockPathLen];
+};
+
+struct ScopedDeadlockReportBuffers {
+ ScopedDeadlockReportBuffers() {
+ b = reinterpret_cast<DeadlockReportBuffers *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
+ }
+ ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
+ DeadlockReportBuffers *b;
+};
+
+// Helper to pass to GraphCycles::UpdateStackTrace.
+int GetStack(void** stack, int max_depth) {
+ return absl::GetStackTrace(stack, max_depth, 3);
+}
+} // anonymous namespace
+
+// Called in debug mode when a thread is about to acquire a lock in a way that
+// may block.
+static GraphId DeadlockCheck(Mutex *mu) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kIgnore) {
+ return InvalidGraphId();
+ }
+
+ SynchLocksHeld *all_locks = Synch_GetAllLocks();
+
+ absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
+ const GraphId mu_id = GetGraphIdLocked(mu);
+
+ if (all_locks->n == 0) {
+ // There are no other locks held. Return now so that we don't need to
+ // call GetSynchEvent(). This way we do not record the stack trace
+ // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
+ // it can't always be the first lock acquired by a thread.
+ return mu_id;
+ }
+
+ // We prefer to keep stack traces that show a thread holding and acquiring
+ // as many locks as possible. This increases the chances that a given edge
+ // in the acquires-before graph will be represented in the stack traces
+ // recorded for the locks.
+ deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
+
+ // For each other mutex already held by this thread:
+ for (int i = 0; i != all_locks->n; i++) {
+ const GraphId other_node_id = all_locks->locks[i].id;
+ const Mutex *other =
+ static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ if (other == nullptr) {
+ // Ignore stale lock
+ continue;
+ }
+
+ // Add the acquired-before edge to the graph.
+ if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
+ ScopedDeadlockReportBuffers scoped_buffers;
+ DeadlockReportBuffers *b = scoped_buffers.b;
+ static int number_of_reported_deadlocks = 0;
+ number_of_reported_deadlocks++;
+ // Symbolize only 2 first deadlock report to avoid huge slowdowns.
+ bool symbolize = number_of_reported_deadlocks <= 2;
+ ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
+ CurrentStackString(b->buf, sizeof (b->buf), symbolize));
+ int len = 0;
+ for (int j = 0; j != all_locks->n; j++) {
+ void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
+ if (pr != nullptr) {
+ snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ len += static_cast<int>(strlen(&b->buf[len]));
+ }
+ }
ABSL_RAW_LOG(ERROR,
"Acquiring absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed",
- static_cast<void *>(mu), b->buf);
- ABSL_RAW_LOG(ERROR, "Cycle: ");
- int path_len = deadlock_graph->FindPath(
- mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
- GraphId id = b->path[j];
- Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
- if (path_mu == nullptr) continue;
- void** stack;
- int depth = deadlock_graph->GetStackTrace(id, &stack);
- snprintf(b->buf, sizeof(b->buf),
- "mutex@%p stack: ", static_cast<void *>(path_mu));
- StackString(stack, depth, b->buf + strlen(b->buf),
- static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
- symbolize);
- ABSL_RAW_LOG(ERROR, "%s", b->buf);
- }
- if (synch_deadlock_detection.load(std::memory_order_acquire) ==
- OnDeadlockCycle::kAbort) {
- deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
- ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
- return mu_id;
- }
- break; // report at most one potential deadlock per acquisition
- }
- }
-
- return mu_id;
-}
-
-// Invoke DeadlockCheck() iff we're in debug mode and
-// deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
- if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- return DeadlockCheck(mu);
- } else {
- return InvalidGraphId();
- }
-}
-
-void Mutex::ForgetDeadlockInfo() {
- if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- deadlock_graph_mu.Lock();
- if (deadlock_graph != nullptr) {
- deadlock_graph->RemoveNode(this);
- }
- deadlock_graph_mu.Unlock();
- }
-}
-
-void Mutex::AssertNotHeld() const {
- // We have the data to allow this check only if in debug mode and deadlock
- // detection is enabled.
- if (kDebugMode &&
- (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
- synch_deadlock_detection.load(std::memory_order_acquire) !=
- OnDeadlockCycle::kIgnore) {
- GraphId id = GetGraphId(const_cast<Mutex *>(this));
- SynchLocksHeld *locks = Synch_GetAllLocks();
- for (int i = 0; i != locks->n; i++) {
- if (locks->locks[i].id == id) {
- SynchEvent *mu_events = GetSynchEvent(this);
- ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
- static_cast<const void *>(this),
- (mu_events == nullptr ? "" : mu_events->name));
- }
- }
- }
-}
-
-// Attempt to acquire *mu, and return whether successful. The implementation
-// may spin for a short while if the lock cannot be acquired immediately.
-static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
+ static_cast<void *>(mu), b->buf);
+ ABSL_RAW_LOG(ERROR, "Cycle: ");
+ int path_len = deadlock_graph->FindPath(
+ mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len; j++) {
+ GraphId id = b->path[j];
+ Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ if (path_mu == nullptr) continue;
+ void** stack;
+ int depth = deadlock_graph->GetStackTrace(id, &stack);
+ snprintf(b->buf, sizeof(b->buf),
+ "mutex@%p stack: ", static_cast<void *>(path_mu));
+ StackString(stack, depth, b->buf + strlen(b->buf),
+ static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
+ symbolize);
+ ABSL_RAW_LOG(ERROR, "%s", b->buf);
+ }
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kAbort) {
+ deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
+ ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
+ return mu_id;
+ }
+ break; // report at most one potential deadlock per acquisition
+ }
+ }
+
+ return mu_id;
+}
+
+// Invoke DeadlockCheck() iff we're in debug mode and
+// deadlock checking has been enabled.
+static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ return DeadlockCheck(mu);
+ } else {
+ return InvalidGraphId();
+ }
+}
+
+void Mutex::ForgetDeadlockInfo() {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ deadlock_graph_mu.Lock();
+ if (deadlock_graph != nullptr) {
+ deadlock_graph->RemoveNode(this);
+ }
+ deadlock_graph_mu.Unlock();
+ }
+}
+
+void Mutex::AssertNotHeld() const {
+ // We have the data to allow this check only if in debug mode and deadlock
+ // detection is enabled.
+ if (kDebugMode &&
+ (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
+ synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ GraphId id = GetGraphId(const_cast<Mutex *>(this));
+ SynchLocksHeld *locks = Synch_GetAllLocks();
+ for (int i = 0; i != locks->n; i++) {
+ if (locks->locks[i].id == id) {
+ SynchEvent *mu_events = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
+ static_cast<const void *>(this),
+ (mu_events == nullptr ? "" : mu_events->name));
+ }
+ }
+ }
+}
+
+// Attempt to acquire *mu, and return whether successful. The implementation
+// may spin for a short while if the lock cannot be acquired immediately.
+static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = GetMutexGlobals().spinloop_iterations;
- do { // do/while somewhat faster on AMD
- intptr_t v = mu->load(std::memory_order_relaxed);
+ do { // do/while somewhat faster on AMD
+ intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader|kMuEvent)) != 0) {
return false; // a reader or tracing -> give up
- } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
- mu->compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
+ } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
+ mu->compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
return true;
- }
+ }
} while (--c > 0);
return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
- GraphId id = DebugOnlyDeadlockCheck(this);
- intptr_t v = mu_.load(std::memory_order_relaxed);
- // try fast acquire, then spin loop
- if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
- !mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- // try spin acquire, then slow loop
- if (!TryAcquireWithSpinning(&this->mu_)) {
- this->LockSlow(kExclusive, nullptr, 0);
- }
- }
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
- GraphId id = DebugOnlyDeadlockCheck(this);
- intptr_t v = mu_.load(std::memory_order_relaxed);
- // try fast acquire, then slow loop
- if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
- !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- this->LockSlow(kShared, nullptr, 0);
- }
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
-}
-
-void Mutex::LockWhen(const Condition &cond) {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
- GraphId id = DebugOnlyDeadlockCheck(this);
- this->LockSlow(kExclusive, &cond, 0);
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
-}
-
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
- GraphId id = DebugOnlyDeadlockCheck(this);
- bool res = LockSlowWithDeadline(kExclusive, &cond,
- KernelTimeout(deadline), 0);
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
- return res;
-}
-
-void Mutex::ReaderLockWhen(const Condition &cond) {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
- GraphId id = DebugOnlyDeadlockCheck(this);
- this->LockSlow(kShared, &cond, 0);
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
-}
-
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
- absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
- absl::Time deadline) {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
- GraphId id = DebugOnlyDeadlockCheck(this);
- bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
- DebugOnlyLockEnter(this, id);
- ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
- return res;
-}
-
-void Mutex::Await(const Condition &cond) {
- if (cond.Eval()) { // condition already true; nothing to do
- if (kDebugMode) {
- this->AssertReaderHeld();
- }
- } else { // normal case
- ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
- "condition untrue on return from Await");
- }
-}
-
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
- if (cond.Eval()) { // condition already true; nothing to do
- if (kDebugMode) {
- this->AssertReaderHeld();
- }
- return true;
- }
-
- KernelTimeout t{deadline};
- bool res = this->AwaitCommon(cond, t);
- ABSL_RAW_CHECK(res || t.has_timeout(),
- "condition untrue on return from Await");
- return res;
-}
-
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
- this->AssertReaderHeld();
- MuHow how =
- (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
- ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
- SynchWaitParams waitp(
- how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
- int flags = kMuHasBlocked;
- if (!Condition::GuaranteedEqual(&cond, nullptr)) {
- flags |= kMuIsCond;
- }
- this->UnlockSlow(&waitp);
- this->Block(waitp.thread);
- ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
- ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
- this->LockSlowLoop(&waitp, flags);
- bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
- EvalConditionAnnotated(&cond, this, true, false, how == kShared);
- ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
- return res;
-}
-
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
- ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
- intptr_t v = mu_.load(std::memory_order_relaxed);
- if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
- mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- DebugOnlyLockEnter(this);
- ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
- return true;
- }
- if ((v & kMuEvent) != 0) { // we're recording events
- if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
- mu_.compare_exchange_strong(
- v, (kExclusive->fast_or | v) + kExclusive->fast_add,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- DebugOnlyLockEnter(this);
- PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
- ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
- return true;
- } else {
- PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
- }
- }
- ABSL_TSAN_MUTEX_POST_LOCK(
- this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
- return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
- ABSL_TSAN_MUTEX_PRE_LOCK(this,
- __tsan_mutex_read_lock | __tsan_mutex_try_lock);
- intptr_t v = mu_.load(std::memory_order_relaxed);
- // The while-loops (here and below) iterate only if the mutex word keeps
- // changing (typically because the reader count changes) under the CAS. We
- // limit the number of attempts to avoid having to think about livelock.
- int loop_limit = 5;
- while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
- if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- DebugOnlyLockEnter(this);
- ABSL_TSAN_MUTEX_POST_LOCK(
- this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
- return true;
- }
- loop_limit--;
- v = mu_.load(std::memory_order_relaxed);
- }
- if ((v & kMuEvent) != 0) { // we're recording events
- loop_limit = 5;
- while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
- if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- DebugOnlyLockEnter(this);
- PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
- ABSL_TSAN_MUTEX_POST_LOCK(
- this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
- return true;
- }
- loop_limit--;
- v = mu_.load(std::memory_order_relaxed);
- }
- if ((v & kMuEvent) != 0) {
- PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
- }
- }
- ABSL_TSAN_MUTEX_POST_LOCK(this,
- __tsan_mutex_read_lock | __tsan_mutex_try_lock |
- __tsan_mutex_try_lock_failed,
- 0);
- return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
- ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
- DebugOnlyLockLeave(this);
- intptr_t v = mu_.load(std::memory_order_relaxed);
-
- if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
- ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
- static_cast<unsigned>(v));
- }
-
- // should_try_cas is whether we'll try a compare-and-swap immediately.
- // NOTE: optimized out when kDebugMode is false.
- bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
- (v & (kMuWait | kMuDesig)) != kMuWait);
- // But, we can use an alternate computation of it, that compilers
- // currently don't find on their own. When that changes, this function
- // can be simplified.
- intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
- intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
- // Claim: "x == 0 && y > 0" is equal to should_try_cas.
- // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
- // all possible non-zero values for x exceed all possible values for y.
- // Therefore, (x == 0 && y > 0) == (x < y).
- if (kDebugMode && should_try_cas != (x < y)) {
- // We would usually use PRIdPTR here, but is not correctly implemented
- // within the android toolchain.
- ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
- static_cast<long long>(v), static_cast<long long>(x),
- static_cast<long long>(y));
- }
- if (x < y &&
- mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
- std::memory_order_release,
- std::memory_order_relaxed)) {
- // fast writer release (writer with no waiters or with designated waker)
- } else {
- this->UnlockSlow(nullptr /*no waitp*/); // take slow path
- }
- ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
-}
-
-// Requires v to represent a reader-locked state.
-static bool ExactlyOneReader(intptr_t v) {
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
- assert((v & kMuHigh) != 0);
- // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
- // on some architectures the following generates slightly smaller code.
- // It may be faster too.
- constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
- return (v & kMuMultipleWaitersMask) == 0;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
- ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
- DebugOnlyLockLeave(this);
- intptr_t v = mu_.load(std::memory_order_relaxed);
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
- if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
- // fast reader release (reader with no waiters)
- intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
- std::memory_order_relaxed)) {
- ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
- return;
- }
- }
- this->UnlockSlow(nullptr /*no waitp*/); // take slow path
- ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
-}
-
-// The zap_desig_waker bitmask is used to clear the designated waker flag in
-// the mutex if this thread has blocked, and therefore may be the designated
-// waker.
-static const intptr_t zap_desig_waker[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuDesig) // blocked; turn off the designated waker bit
-};
-
-// The ignore_waiting_writers bitmask is used to ignore the existence
-// of waiting writers if a reader that has already blocked once
-// wakes up.
-static const intptr_t ignore_waiting_writers[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuWrWait) // blocked; pretend there are no waiting writers
-};
-
-// Internal version of LockWhen(). See LockSlowWithDeadline()
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then spin loop
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ // try spin acquire, then slow loop
+ if (!TryAcquireWithSpinning(&this->mu_)) {
+ this->LockSlow(kExclusive, nullptr, 0);
+ }
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then slow loop
+ if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ this->LockSlow(kShared, nullptr, 0);
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+void Mutex::LockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kExclusive, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
+ return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
+}
+
+void Mutex::ReaderLockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kShared, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+ absl::Duration timeout) {
+ return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+ absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
+}
+
+void Mutex::Await(const Condition &cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ } else { // normal case
+ ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
+ "condition untrue on return from Await");
+ }
+}
+
+bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
+ return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{deadline};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
+}
+
+bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+ this->AssertReaderHeld();
+ MuHow how =
+ (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
+ SynchWaitParams waitp(
+ how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ int flags = kMuHasBlocked;
+ if (!Condition::GuaranteedEqual(&cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
+ this->LockSlowLoop(&waitp, flags);
+ bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ EvalConditionAnnotated(&cond, this, true, false, how == kShared);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
+ return res;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (kExclusive->fast_or | v) + kExclusive->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ } else {
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // The while-loops (here and below) iterate only if the mutex word keeps
+ // changing (typically because the reader count changes) under the CAS. We
+ // limit the number of attempts to avoid having to think about livelock.
+ int loop_limit = 5;
+ while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ loop_limit = 5;
+ while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock |
+ __tsan_mutex_try_lock_failed,
+ 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+
+ if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
+ ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
+ static_cast<unsigned>(v));
+ }
+
+ // should_try_cas is whether we'll try a compare-and-swap immediately.
+ // NOTE: optimized out when kDebugMode is false.
+ bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
+ (v & (kMuWait | kMuDesig)) != kMuWait);
+ // But, we can use an alternate computation of it, that compilers
+ // currently don't find on their own. When that changes, this function
+ // can be simplified.
+ intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
+ intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
+ // Claim: "x == 0 && y > 0" is equal to should_try_cas.
+ // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
+ // all possible non-zero values for x exceed all possible values for y.
+ // Therefore, (x == 0 && y > 0) == (x < y).
+ if (kDebugMode && should_try_cas != (x < y)) {
+ // We would usually use PRIdPTR here, but is not correctly implemented
+ // within the android toolchain.
+ ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
+ static_cast<long long>(v), static_cast<long long>(x),
+ static_cast<long long>(y));
+ }
+ if (x < y &&
+ mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ // fast writer release (writer with no waiters or with designated waker)
+ } else {
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ }
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+}
+
+// Requires v to represent a reader-locked state.
+static bool ExactlyOneReader(intptr_t v) {
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & kMuHigh) != 0);
+ // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
+ // on some architectures the following generates slightly smaller code.
+ // It may be faster too.
+ constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
+ return (v & kMuMultipleWaitersMask) == 0;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+ return;
+ }
+ }
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+}
+
+// The zap_desig_waker bitmask is used to clear the designated waker flag in
+// the mutex if this thread has blocked, and therefore may be the designated
+// waker.
+static const intptr_t zap_desig_waker[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuDesig) // blocked; turn off the designated waker bit
+};
+
+// The ignore_waiting_writers bitmask is used to ignore the existence
+// of waiting writers if a reader that has already blocked once
+// wakes up.
+static const intptr_t ignore_waiting_writers[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuWrWait) // blocked; pretend there are no waiting writers
+};
+
+// Internal version of LockWhen(). See LockSlowWithDeadline()
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
int flags) {
- ABSL_RAW_CHECK(
- this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
- "condition untrue on return from LockSlow");
-}
-
-// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
- bool locking, bool trylock,
- bool read_lock) {
- // Delicate annotation dance.
- // We are currently inside of read/write lock/unlock operation.
- // All memory accesses are ignored inside of mutex operations + for unlock
- // operation tsan considers that we've already released the mutex.
- bool res = false;
+ ABSL_RAW_CHECK(
+ this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
+ "condition untrue on return from LockSlow");
+}
+
+// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
+static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+ bool locking, bool trylock,
+ bool read_lock) {
+ // Delicate annotation dance.
+ // We are currently inside of read/write lock/unlock operation.
+ // All memory accesses are ignored inside of mutex operations + for unlock
+ // operation tsan considers that we've already released the mutex.
+ bool res = false;
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
- const int flags = read_lock ? __tsan_mutex_read_lock : 0;
- const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
-#endif
- if (locking) {
- // For lock we pretend that we have finished the operation,
- // evaluate the predicate, then unlock the mutex and start locking it again
- // to match the annotation at the end of outer lock operation.
- // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
- // will think the lock acquisition is recursive which will trigger
- // deadlock detector.
- ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
- res = cond->Eval();
- // There is no "try" version of Unlock, so use flags instead of tryflags.
- ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
- ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
- ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
- } else {
- // Similarly, for unlock we pretend that we have unlocked the mutex,
- // lock the mutex, evaluate the predicate, and start unlocking it again
- // to match the annotation at the end of outer unlock operation.
- ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
- ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
- ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
- res = cond->Eval();
- ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
- }
- // Prevent unused param warnings in non-TSAN builds.
- static_cast<void>(mu);
- static_cast<void>(trylock);
- static_cast<void>(read_lock);
- return res;
-}
-
-// Compute cond->Eval() hiding it from race detectors.
-// We are hiding it because inside of UnlockSlow we can evaluate a predicate
-// that was just added by a concurrent Lock operation; Lock adds the predicate
-// to the internal Mutex list without actually acquiring the Mutex
-// (it only acquires the internal spinlock, which is rightfully invisible for
-// tsan). As the result there is no tsan-visible synchronization between the
-// addition and this thread. So if we would enable race detection here,
-// it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
- // Memory accesses are already ignored inside of lock/unlock operations,
- // but synchronization operations are also ignored. When we evaluate the
- // predicate we must ignore only memory accesses but not synchronization,
- // because missed synchronization can lead to false reports later.
- // So we "divert" (which un-ignores both memory accesses and synchronization)
- // and then separately turn on ignores of memory accesses.
- ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ const int flags = read_lock ? __tsan_mutex_read_lock : 0;
+ const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
+#endif
+ if (locking) {
+ // For lock we pretend that we have finished the operation,
+ // evaluate the predicate, then unlock the mutex and start locking it again
+ // to match the annotation at the end of outer lock operation.
+ // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
+ // will think the lock acquisition is recursive which will trigger
+ // deadlock detector.
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
+ res = cond->Eval();
+ // There is no "try" version of Unlock, so use flags instead of tryflags.
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
+ } else {
+ // Similarly, for unlock we pretend that we have unlocked the mutex,
+ // lock the mutex, evaluate the predicate, and start unlocking it again
+ // to match the annotation at the end of outer unlock operation.
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
+ res = cond->Eval();
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
+ }
+ // Prevent unused param warnings in non-TSAN builds.
+ static_cast<void>(mu);
+ static_cast<void>(trylock);
+ static_cast<void>(read_lock);
+ return res;
+}
+
+// Compute cond->Eval() hiding it from race detectors.
+// We are hiding it because inside of UnlockSlow we can evaluate a predicate
+// that was just added by a concurrent Lock operation; Lock adds the predicate
+// to the internal Mutex list without actually acquiring the Mutex
+// (it only acquires the internal spinlock, which is rightfully invisible for
+// tsan). As the result there is no tsan-visible synchronization between the
+// addition and this thread. So if we would enable race detection here,
+// it would race with the predicate initialization.
+static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+ // Memory accesses are already ignored inside of lock/unlock operations,
+ // but synchronization operations are also ignored. When we evaluate the
+ // predicate we must ignore only memory accesses but not synchronization,
+ // because missed synchronization can lead to false reports later.
+ // So we "divert" (which un-ignores both memory accesses and synchronization)
+ // and then separately turn on ignores of memory accesses.
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
- bool res = cond->Eval();
+ bool res = cond->Eval();
ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
- ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
- static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
- return res;
-}
-
-// Internal equivalent of *LockWhenWithDeadline(), where
-// "t" represents the absolute timeout; !t.has_timeout() means "forever".
-// "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
-// In flags, bits are ored together:
-// - kMuHasBlocked indicates that the client has already blocked on the call so
-// the designated waker bit must be cleared and waiting writers should not
-// obstruct this call
-// - kMuIsCond indicates that this is a conditional acquire (condition variable,
-// Await, LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
- KernelTimeout t, int flags) {
- intptr_t v = mu_.load(std::memory_order_relaxed);
- bool unlock = false;
- if ((v & how->fast_need_zero) == 0 && // try fast acquire
- mu_.compare_exchange_strong(
- v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
- how->fast_add,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- if (cond == nullptr ||
- EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
- return true;
- }
- unlock = true;
- }
- SynchWaitParams waitp(
- how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
- if (!Condition::GuaranteedEqual(cond, nullptr)) {
- flags |= kMuIsCond;
- }
- if (unlock) {
- this->UnlockSlow(&waitp);
- this->Block(waitp.thread);
- flags |= kMuHasBlocked;
- }
- this->LockSlowLoop(&waitp, flags);
- return waitp.cond != nullptr || // => cond known true from LockSlowLoop
- cond == nullptr ||
- EvalConditionAnnotated(cond, this, true, false, how == kShared);
-}
-
-// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
-// the printf-style argument list. The format string must be a literal.
-// Arguments after the first are not evaluated unless the condition is true.
-#define RAW_CHECK_FMT(cond, ...) \
- do { \
- if (ABSL_PREDICT_FALSE(!(cond))) { \
- ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
- } \
- } while (0)
-
-static void CheckForMutexCorruption(intptr_t v, const char* label) {
- // Test for either of two situations that should not occur in v:
- // kMuWriter and kMuReader
- // kMuWrWait and !kMuWait
- const uintptr_t w = v ^ kMuWait;
- // By flipping that bit, we can now test for:
- // kMuWriter and kMuReader in w
- // kMuWrWait and kMuWait in w
- // We've chosen these two pairs of values to be so that they will overlap,
- // respectively, when the word is left shifted by three. This allows us to
- // save a branch in the common (correct) case of them not being coincident.
- static_assert(kMuReader << 3 == kMuWriter, "must match");
- static_assert(kMuWait << 3 == kMuWrWait, "must match");
- if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
- RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
- "%s: Mutex corrupt: both reader and writer lock held: %p",
- label, reinterpret_cast<void *>(v));
- RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
- "%s: Mutex corrupt: waiting writer with no waiters: %p",
- label, reinterpret_cast<void *>(v));
- assert(false);
-}
-
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
+ return res;
+}
+
+// Internal equivalent of *LockWhenWithDeadline(), where
+// "t" represents the absolute timeout; !t.has_timeout() means "forever".
+// "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
+// In flags, bits are ored together:
+// - kMuHasBlocked indicates that the client has already blocked on the call so
+// the designated waker bit must be cleared and waiting writers should not
+// obstruct this call
+// - kMuIsCond indicates that this is a conditional acquire (condition variable,
+// Await, LockWhen) so contention profiling should be suppressed.
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+ KernelTimeout t, int flags) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ bool unlock = false;
+ if ((v & how->fast_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (cond == nullptr ||
+ EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
+ return true;
+ }
+ unlock = true;
+ }
+ SynchWaitParams waitp(
+ how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ if (!Condition::GuaranteedEqual(cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ if (unlock) {
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ flags |= kMuHasBlocked;
+ }
+ this->LockSlowLoop(&waitp, flags);
+ return waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ cond == nullptr ||
+ EvalConditionAnnotated(cond, this, true, false, how == kShared);
+}
+
+// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
+// the printf-style argument list. The format string must be a literal.
+// Arguments after the first are not evaluated unless the condition is true.
+#define RAW_CHECK_FMT(cond, ...) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(cond))) { \
+ ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
+ } \
+ } while (0)
+
+static void CheckForMutexCorruption(intptr_t v, const char* label) {
+ // Test for either of two situations that should not occur in v:
+ // kMuWriter and kMuReader
+ // kMuWrWait and !kMuWait
+ const uintptr_t w = v ^ kMuWait;
+ // By flipping that bit, we can now test for:
+ // kMuWriter and kMuReader in w
+ // kMuWrWait and kMuWait in w
+ // We've chosen these two pairs of values to be so that they will overlap,
+ // respectively, when the word is left shifted by three. This allows us to
+ // save a branch in the common (correct) case of them not being coincident.
+ static_assert(kMuReader << 3 == kMuWriter, "must match");
+ static_assert(kMuWait << 3 == kMuWrWait, "must match");
+ if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
+ RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
+ "%s: Mutex corrupt: both reader and writer lock held: %p",
+ label, reinterpret_cast<void *>(v));
+ RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
+ "%s: Mutex corrupt: waiting writer with no waiters: %p",
+ label, reinterpret_cast<void *>(v));
+ assert(false);
+}
+
+void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
- int c = 0;
- intptr_t v = mu_.load(std::memory_order_relaxed);
- if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
- }
- ABSL_RAW_CHECK(
- waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
- "detected illegal recursion into Mutex code");
- for (;;) {
- v = mu_.load(std::memory_order_relaxed);
- CheckForMutexCorruption(v, "Lock");
- if ((v & waitp->how->slow_need_zero) == 0) {
- if (mu_.compare_exchange_strong(
- v, (waitp->how->fast_or |
- (v & zap_desig_waker[flags & kMuHasBlocked])) +
- waitp->how->fast_add,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- if (waitp->cond == nullptr ||
- EvalConditionAnnotated(waitp->cond, this, true, false,
- waitp->how == kShared)) {
- break; // we timed out, or condition true, so return
- }
- this->UnlockSlow(waitp); // got lock but condition false
- this->Block(waitp->thread);
- flags |= kMuHasBlocked;
- c = 0;
- }
- } else { // need to access waiter list
- bool dowait = false;
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
- // This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
- intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
- kMuWait;
- ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
- if (waitp->how == kExclusive && (v & kMuReader) != 0) {
- nv |= kMuWrWait;
- }
- if (mu_.compare_exchange_strong(
- v, reinterpret_cast<intptr_t>(new_h) | nv,
- std::memory_order_release, std::memory_order_relaxed)) {
- dowait = true;
- } else { // attempted Enqueue() failed
- // zero out the waitp field set by Enqueue()
- waitp->thread->waitp = nullptr;
- }
- } else if ((v & waitp->how->slow_inc_need_zero &
- ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
- // This is a reader that needs to increment the reader count,
- // but the count is currently held in the last waiter.
- if (mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuReader,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- h->readers += kMuOne; // inc reader count in waiter
- do { // release spinlock
- v = mu_.load(std::memory_order_relaxed);
- } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
- std::memory_order_release,
- std::memory_order_relaxed));
- if (waitp->cond == nullptr ||
- EvalConditionAnnotated(waitp->cond, this, true, false,
- waitp->how == kShared)) {
- break; // we timed out, or condition true, so return
- }
- this->UnlockSlow(waitp); // got lock but condition false
- this->Block(waitp->thread);
- flags |= kMuHasBlocked;
- c = 0;
- }
- } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
- mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuWait,
- std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
- intptr_t wr_wait = 0;
- ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
- if (waitp->how == kExclusive && (v & kMuReader) != 0) {
- wr_wait = kMuWrWait; // give priority to a waiting writer
- }
- do { // release spinlock
- v = mu_.load(std::memory_order_relaxed);
- } while (!mu_.compare_exchange_weak(
- v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
- reinterpret_cast<intptr_t>(new_h),
- std::memory_order_release, std::memory_order_relaxed));
- dowait = true;
- }
- if (dowait) {
- this->Block(waitp->thread); // wait until removed from list or timeout
- flags |= kMuHasBlocked;
- c = 0;
- }
- }
- ABSL_RAW_CHECK(
- waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
- "detected illegal recursion into Mutex code");
+ int c = 0;
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ CheckForMutexCorruption(v, "Lock");
+ if ((v & waitp->how->slow_need_zero) == 0) {
+ if (mu_.compare_exchange_strong(
+ v, (waitp->how->fast_or |
+ (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ waitp->how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, false,
+ waitp->how == kShared)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else { // need to access waiter list
+ bool dowait = false;
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
+ kMuWait;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ nv |= kMuWrWait;
+ }
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | nv,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ dowait = true;
+ } else { // attempted Enqueue() failed
+ // zero out the waitp field set by Enqueue()
+ waitp->thread->waitp = nullptr;
+ }
+ } else if ((v & waitp->how->slow_inc_need_zero &
+ ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+ // This is a reader that needs to increment the reader count,
+ // but the count is currently held in the last waiter.
+ if (mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuReader,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, false,
+ waitp->how == kShared)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
+ mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuWait,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ intptr_t wr_wait = 0;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ wr_wait = kMuWrWait; // give priority to a waiting writer
+ }
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ dowait = true;
+ }
+ if (dowait) {
+ this->Block(waitp->thread); // wait until removed from list or timeout
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
// delay, then try again
c = synchronization_internal::MutexDelay(c, GENTLE);
- }
- ABSL_RAW_CHECK(
- waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
- "detected illegal recursion into Mutex code");
- if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
- SYNCH_EV_READERLOCK_RETURNING);
- }
-}
-
-// Unlock this mutex, which is held by the current thread.
-// If waitp is non-zero, it must be the wait parameters for the current thread
-// which holds the lock but is not runnable because its condition is false
-// or it is in the process of blocking on a condition variable; it must requeue
-// itself on the mutex/condvar to wait for its condition to become true.
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
+ SYNCH_EV_READERLOCK_RETURNING);
+ }
+}
+
+// Unlock this mutex, which is held by the current thread.
+// If waitp is non-zero, it must be the wait parameters for the current thread
+// which holds the lock but is not runnable because its condition is false
+// or it is in the process of blocking on a condition variable; it must requeue
+// itself on the mutex/condvar to wait for its condition to become true.
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling;
- intptr_t v = mu_.load(std::memory_order_relaxed);
- this->AssertReaderHeld();
- CheckForMutexCorruption(v, "Unlock");
- if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
- }
- int c = 0;
- // the waiter under consideration to wake, or zero
- PerThreadSynch *w = nullptr;
- // the predecessor to w or zero
- PerThreadSynch *pw = nullptr;
- // head of the list searched previously, or zero
- PerThreadSynch *old_h = nullptr;
- // a condition that's known to be false.
- const Condition *known_false = nullptr;
- PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
- intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
- // later writer could have acquired the lock
- // (starvation avoidance)
- ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
- waitp->thread->suppress_fatal_errors,
- "detected illegal recursion into Mutex code");
- // This loop finds threads wake_list to wakeup if any, and removes them from
- // the list of waiters. In addition, it places waitp.thread on the queue of
- // waiters if waitp is non-zero.
- for (;;) {
- v = mu_.load(std::memory_order_relaxed);
- if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
- waitp == nullptr) {
- // fast writer release (writer with no waiters or with designated waker)
- if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
- std::memory_order_release,
- std::memory_order_relaxed)) {
- return;
- }
- } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
- // fast reader release (reader with no waiters)
- intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
- std::memory_order_relaxed)) {
- return;
- }
- } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
- mu_.compare_exchange_strong(v, v | kMuSpin,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- if ((v & kMuWait) == 0) { // no one to wake
- intptr_t nv;
- bool do_enqueue = true; // always Enqueue() the first time
- ABSL_RAW_CHECK(waitp != nullptr,
- "UnlockSlow is confused"); // about to sleep
- do { // must loop to release spinlock as reader count may change
- v = mu_.load(std::memory_order_relaxed);
- // decrement reader count if there are readers
- intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
- PerThreadSynch *new_h = nullptr;
- if (do_enqueue) {
- // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
- // we must not retry here. The initial attempt will always have
- // succeeded, further attempts would enqueue us against *this due to
- // Fer() handling.
- do_enqueue = (waitp->cv_word == nullptr);
- new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
- }
- intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
- if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
- clear = kMuWrWait | kMuReader; // clear read bit
- }
- nv = (v & kMuLow & ~clear & ~kMuSpin);
- if (new_h != nullptr) {
- nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
- } else { // new_h could be nullptr if we queued ourselves on a
- // CondVar
- // In that case, we must place the reader count back in the mutex
- // word, as Enqueue() did not store it in the new waiter.
- nv |= new_readers & kMuHigh;
- }
- // release spinlock & our lock; retry if reader-count changed
- // (writer count cannot change since we hold lock)
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
- std::memory_order_relaxed));
- break;
- }
-
- // There are waiters.
- // Set h to the head of the circular waiter list.
- PerThreadSynch *h = GetPerThreadSynch(v);
- if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
- // a reader but not the last
- h->readers -= kMuOne; // release our lock
- intptr_t nv = v; // normally just release spinlock
- if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
- ABSL_RAW_CHECK(new_h != nullptr,
- "waiters disappeared during Enqueue()!");
- nv &= kMuLow;
- nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
- }
- mu_.store(nv, std::memory_order_release); // release spinlock
- // can release with a store because there were waiters
- break;
- }
-
- // Either we didn't search before, or we marked the queue
- // as "maybe_unlocking" and no one else should have changed it.
- ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
- "Mutex queue changed beneath us");
-
- // The lock is becoming free, and there's a waiter
- if (old_h != nullptr &&
- !old_h->may_skip) { // we used old_h as a terminator
- old_h->may_skip = true; // allow old_h to skip once more
- ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ this->AssertReaderHeld();
+ CheckForMutexCorruption(v, "Unlock");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ }
+ int c = 0;
+ // the waiter under consideration to wake, or zero
+ PerThreadSynch *w = nullptr;
+ // the predecessor to w or zero
+ PerThreadSynch *pw = nullptr;
+ // head of the list searched previously, or zero
+ PerThreadSynch *old_h = nullptr;
+ // a condition that's known to be false.
+ const Condition *known_false = nullptr;
+ PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
+ ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
+ waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ // This loop finds threads wake_list to wakeup if any, and removes them from
+ // the list of waiters. In addition, it places waitp.thread on the queue of
+ // waiters if waitp is non-zero.
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
+ waitp == nullptr) {
+ // fast writer release (writer with no waiters or with designated waker)
+ if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
+ mu_.compare_exchange_strong(v, v | kMuSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if ((v & kMuWait) == 0) { // no one to wake
+ intptr_t nv;
+ bool do_enqueue = true; // always Enqueue() the first time
+ ABSL_RAW_CHECK(waitp != nullptr,
+ "UnlockSlow is confused"); // about to sleep
+ do { // must loop to release spinlock as reader count may change
+ v = mu_.load(std::memory_order_relaxed);
+ // decrement reader count if there are readers
+ intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
+ PerThreadSynch *new_h = nullptr;
+ if (do_enqueue) {
+ // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
+ // we must not retry here. The initial attempt will always have
+ // succeeded, further attempts would enqueue us against *this due to
+ // Fer() handling.
+ do_enqueue = (waitp->cv_word == nullptr);
+ new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
+ }
+ intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
+ if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
+ clear = kMuWrWait | kMuReader; // clear read bit
+ }
+ nv = (v & kMuLow & ~clear & ~kMuSpin);
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } else { // new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ // In that case, we must place the reader count back in the mutex
+ // word, as Enqueue() did not store it in the new waiter.
+ nv |= new_readers & kMuHigh;
+ }
+ // release spinlock & our lock; retry if reader-count changed
+ // (writer count cannot change since we hold lock)
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ break;
+ }
+
+ // There are waiters.
+ // Set h to the head of the circular waiter list.
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
+ // a reader but not the last
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
+ if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "waiters disappeared during Enqueue()!");
+ nv &= kMuLow;
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ }
+ mu_.store(nv, std::memory_order_release); // release spinlock
+ // can release with a store because there were waiters
+ break;
+ }
+
+ // Either we didn't search before, or we marked the queue
+ // as "maybe_unlocking" and no one else should have changed it.
+ ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
+ "Mutex queue changed beneath us");
+
+ // The lock is becoming free, and there's a waiter
+ if (old_h != nullptr &&
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
+ ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
- old_h->skip = old_h->next; // old_h not head & can skip to successor
- }
- }
- if (h->next->waitp->how == kExclusive &&
- Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
- // easy case: writer with no condition; no need to search
- pw = h; // wake w, the successor of h (=pw)
- w = h->next;
- w->wake = true;
- // We are waking up a writer. This writer may be racing against
- // an already awake reader for the lock. We want the
- // writer to usually win this race,
- // because if it doesn't, we can potentially keep taking a reader
- // perpetually and writers will starve. Worse than
- // that, this can also starve other readers if kMuWrWait gets set
- // later.
- wr_wait = kMuWrWait;
- } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
- // we found a waiter w to wake on a previous iteration and either it's
- // a writer, or we've searched the entire list so we have all the
- // readers.
- if (pw == nullptr) { // if w's predecessor is unknown, it must be h
- pw = h;
- }
- } else {
- // At this point we don't know all the waiters to wake, and the first
- // waiter has a condition or is a reader. We avoid searching over
- // waiters we've searched on previous iterations by starting at
- // old_h if it's set. If old_h==h, there's no one to wakeup at all.
- if (old_h == h) { // we've searched before, and nothing's new
- // so there's no one to wake.
- intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
- h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
- if (waitp != nullptr) { // we must queue ourselves and sleep
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
- nv &= kMuLow;
- if (new_h != nullptr) {
- nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
- } // else new_h could be nullptr if we queued ourselves on a
- // CondVar
- }
- // release spinlock & lock
- // can release with a store because there were waiters
- mu_.store(nv, std::memory_order_release);
- break;
- }
-
- // set up to walk the list
- PerThreadSynch *w_walk; // current waiter during list walk
- PerThreadSynch *pw_walk; // previous waiter during list walk
- if (old_h != nullptr) { // we've searched up to old_h before
- pw_walk = old_h;
- w_walk = old_h->next;
- } else { // no prior search, start at beginning
- pw_walk =
- nullptr; // h->next's predecessor may change; don't record it
- w_walk = h->next;
- }
-
- h->may_skip = false; // ensure we never skip past h in future searches
- // even if other waiters are queued after it.
- ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
-
- h->maybe_unlocking = true; // we're about to scan the waiter list
- // without the spinlock held.
- // Enqueue must be conservative about
- // priority queuing.
-
- // We must release the spinlock to evaluate the conditions.
- mu_.store(v, std::memory_order_release); // release just spinlock
- // can release with a store because there were waiters
-
- // h is the last waiter queued, and w_walk the first unsearched waiter.
- // Without the spinlock, the locations mu_ and h->next may now change
- // underneath us, but since we hold the lock itself, the only legal
- // change is to add waiters between h and w_walk. Therefore, it's safe
- // to walk the path from w_walk to h inclusive. (TryRemove() can remove
- // a waiter anywhere, but it acquires both the spinlock and the Mutex)
-
- old_h = h; // remember we searched to here
-
- // Walk the path upto and including h looking for waiters we can wake.
- while (pw_walk != h) {
- w_walk->wake = false;
- if (w_walk->waitp->cond ==
- nullptr || // no condition => vacuously true OR
- (w_walk->waitp->cond != known_false &&
- // this thread's condition is not known false, AND
- // is in fact true
- EvalConditionIgnored(this, w_walk->waitp->cond))) {
- if (w == nullptr) {
- w_walk->wake = true; // can wake this waiter
- w = w_walk;
- pw = pw_walk;
- if (w_walk->waitp->how == kExclusive) {
- wr_wait = kMuWrWait;
- break; // bail if waking this writer
- }
- } else if (w_walk->waitp->how == kShared) { // wake if a reader
- w_walk->wake = true;
- } else { // writer with true condition
- wr_wait = kMuWrWait;
- }
- } else { // can't wake; condition false
- known_false = w_walk->waitp->cond; // remember last false condition
- }
- if (w_walk->wake) { // we're waking reader w_walk
- pw_walk = w_walk; // don't skip similar waiters
- } else { // not waking; skip as much as possible
- pw_walk = Skip(w_walk);
- }
- // If pw_walk == h, then load of pw_walk->next can race with
- // concurrent write in Enqueue(). However, at the same time
- // we do not need to do the load, because we will bail out
- // from the loop anyway.
- if (pw_walk != h) {
- w_walk = pw_walk->next;
- }
- }
-
- continue; // restart for(;;)-loop to wakeup w or to find more waiters
- }
- ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
- // The first (and perhaps only) waiter we've chosen to wake is w, whose
- // predecessor is pw. If w is a reader, we must wake all the other
- // waiters with wake==true as well. We may also need to queue
- // ourselves if waitp != null. The spinlock and the lock are still
- // held.
-
- // This traverses the list in [ pw->next, h ], where h is the head,
- // removing all elements with wake==true and placing them in the
- // singly-linked list wake_list. Returns the new head.
- h = DequeueAllWakeable(h, pw, &wake_list);
-
- intptr_t nv = (v & kMuEvent) | kMuDesig;
- // assume no waiters left,
- // set kMuDesig for INV1a
-
- if (waitp != nullptr) { // we must queue ourselves and sleep
- h = Enqueue(h, waitp, v, kMuIsCond);
- // h is new last waiter; could be null if we queued ourselves on a
- // CondVar
- }
-
- ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
- "unexpected empty wake list");
-
- if (h != nullptr) { // there are waiters left
- h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
- nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
- }
-
- // release both spinlock & lock
- // can release with a store because there were waiters
- mu_.store(nv, std::memory_order_release);
- break; // out of for(;;)-loop
- }
+ old_h->skip = old_h->next; // old_h not head & can skip to successor
+ }
+ }
+ if (h->next->waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
+ // easy case: writer with no condition; no need to search
+ pw = h; // wake w, the successor of h (=pw)
+ w = h->next;
+ w->wake = true;
+ // We are waking up a writer. This writer may be racing against
+ // an already awake reader for the lock. We want the
+ // writer to usually win this race,
+ // because if it doesn't, we can potentially keep taking a reader
+ // perpetually and writers will starve. Worse than
+ // that, this can also starve other readers if kMuWrWait gets set
+ // later.
+ wr_wait = kMuWrWait;
+ } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
+ // we found a waiter w to wake on a previous iteration and either it's
+ // a writer, or we've searched the entire list so we have all the
+ // readers.
+ if (pw == nullptr) { // if w's predecessor is unknown, it must be h
+ pw = h;
+ }
+ } else {
+ // At this point we don't know all the waiters to wake, and the first
+ // waiter has a condition or is a reader. We avoid searching over
+ // waiters we've searched on previous iterations by starting at
+ // old_h if it's set. If old_h==h, there's no one to wakeup at all.
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ nv &= kMuLow;
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } // else new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ }
+ // release spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break;
+ }
+
+ // set up to walk the list
+ PerThreadSynch *w_walk; // current waiter during list walk
+ PerThreadSynch *pw_walk; // previous waiter during list walk
+ if (old_h != nullptr) { // we've searched up to old_h before
+ pw_walk = old_h;
+ w_walk = old_h->next;
+ } else { // no prior search, start at beginning
+ pw_walk =
+ nullptr; // h->next's predecessor may change; don't record it
+ w_walk = h->next;
+ }
+
+ h->may_skip = false; // ensure we never skip past h in future searches
+ // even if other waiters are queued after it.
+ ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
+
+ h->maybe_unlocking = true; // we're about to scan the waiter list
+ // without the spinlock held.
+ // Enqueue must be conservative about
+ // priority queuing.
+
+ // We must release the spinlock to evaluate the conditions.
+ mu_.store(v, std::memory_order_release); // release just spinlock
+ // can release with a store because there were waiters
+
+ // h is the last waiter queued, and w_walk the first unsearched waiter.
+ // Without the spinlock, the locations mu_ and h->next may now change
+ // underneath us, but since we hold the lock itself, the only legal
+ // change is to add waiters between h and w_walk. Therefore, it's safe
+ // to walk the path from w_walk to h inclusive. (TryRemove() can remove
+ // a waiter anywhere, but it acquires both the spinlock and the Mutex)
+
+ old_h = h; // remember we searched to here
+
+ // Walk the path upto and including h looking for waiters we can wake.
+ while (pw_walk != h) {
+ w_walk->wake = false;
+ if (w_walk->waitp->cond ==
+ nullptr || // no condition => vacuously true OR
+ (w_walk->waitp->cond != known_false &&
+ // this thread's condition is not known false, AND
+ // is in fact true
+ EvalConditionIgnored(this, w_walk->waitp->cond))) {
+ if (w == nullptr) {
+ w_walk->wake = true; // can wake this waiter
+ w = w_walk;
+ pw = pw_walk;
+ if (w_walk->waitp->how == kExclusive) {
+ wr_wait = kMuWrWait;
+ break; // bail if waking this writer
+ }
+ } else if (w_walk->waitp->how == kShared) { // wake if a reader
+ w_walk->wake = true;
+ } else { // writer with true condition
+ wr_wait = kMuWrWait;
+ }
+ } else { // can't wake; condition false
+ known_false = w_walk->waitp->cond; // remember last false condition
+ }
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
+ pw_walk = Skip(w_walk);
+ }
+ // If pw_walk == h, then load of pw_walk->next can race with
+ // concurrent write in Enqueue(). However, at the same time
+ // we do not need to do the load, because we will bail out
+ // from the loop anyway.
+ if (pw_walk != h) {
+ w_walk = pw_walk->next;
+ }
+ }
+
+ continue; // restart for(;;)-loop to wakeup w or to find more waiters
+ }
+ ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
+ // The first (and perhaps only) waiter we've chosen to wake is w, whose
+ // predecessor is pw. If w is a reader, we must wake all the other
+ // waiters with wake==true as well. We may also need to queue
+ // ourselves if waitp != null. The spinlock and the lock are still
+ // held.
+
+ // This traverses the list in [ pw->next, h ], where h is the head,
+ // removing all elements with wake==true and placing them in the
+ // singly-linked list wake_list. Returns the new head.
+ h = DequeueAllWakeable(h, pw, &wake_list);
+
+ intptr_t nv = (v & kMuEvent) | kMuDesig;
+ // assume no waiters left,
+ // set kMuDesig for INV1a
+
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ h = Enqueue(h, waitp, v, kMuIsCond);
+ // h is new last waiter; could be null if we queued ourselves on a
+ // CondVar
+ }
+
+ ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
+ "unexpected empty wake list");
+
+ if (h != nullptr) { // there are waiters left
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
+ }
+
+ // release both spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break; // out of for(;;)-loop
+ }
// aggressive here; no one can proceed till we do
c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
- } // end of for(;;)-loop
-
- if (wake_list != kPerThreadSynchNull) {
- int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
- bool cond_waiter = wake_list->cond_waiter;
- do {
- wake_list = Wakeup(wake_list); // wake waiters
- } while (wake_list != kPerThreadSynchNull);
- if (!cond_waiter) {
- // Sample lock contention events only if the (first) waiter was trying to
- // acquire the lock, not waiting on a condition variable or Condition.
+ } // end of for(;;)-loop
+
+ if (wake_list != kPerThreadSynchNull) {
+ int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
+ bool cond_waiter = wake_list->cond_waiter;
+ do {
+ wake_list = Wakeup(wake_list); // wake waiters
+ } while (wake_list != kPerThreadSynchNull);
+ if (!cond_waiter) {
+ // Sample lock contention events only if the (first) waiter was trying to
+ // acquire the lock, not waiting on a condition variable or Condition.
int64_t wait_cycles =
base_internal::CycleClock::Now() - enqueue_timestamp;
- mutex_tracer("slow release", this, wait_cycles);
- ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- submit_profile_data(enqueue_timestamp);
- ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
- }
- }
-}
-
-// Used by CondVar implementation to reacquire mutex after waking from
-// condition variable. This routine is used instead of Lock() because the
-// waiting thread may have been moved from the condition variable queue to the
-// mutex queue without a wakeup, by Trans(). In that case, when the thread is
-// finally woken, the woken thread will believe it has been woken from the
-// condition variable (i.e. its PC will be in when in the CondVar code), when
-// in fact it has just been woken from the mutex. Thus, it must enter the slow
-// path of the mutex in the same state as if it had just woken from the mutex.
-// That is, it must ensure to clear kMuDesig (INV1b).
-void Mutex::Trans(MuHow how) {
- this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
-}
-
-// Used by CondVar implementation to effectively wake thread w from the
-// condition variable. If this mutex is free, we simply wake the thread.
-// It will later acquire the mutex with high probability. Otherwise, we
-// enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+ mutex_tracer("slow release", this, wait_cycles);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ submit_profile_data(enqueue_timestamp);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ }
+ }
+}
+
+// Used by CondVar implementation to reacquire mutex after waking from
+// condition variable. This routine is used instead of Lock() because the
+// waiting thread may have been moved from the condition variable queue to the
+// mutex queue without a wakeup, by Trans(). In that case, when the thread is
+// finally woken, the woken thread will believe it has been woken from the
+// condition variable (i.e. its PC will be in when in the CondVar code), when
+// in fact it has just been woken from the mutex. Thus, it must enter the slow
+// path of the mutex in the same state as if it had just woken from the mutex.
+// That is, it must ensure to clear kMuDesig (INV1b).
+void Mutex::Trans(MuHow how) {
+ this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
+}
+
+// Used by CondVar implementation to effectively wake thread w from the
+// condition variable. If this mutex is free, we simply wake the thread.
+// It will later acquire the mutex with high probability. Otherwise, we
+// enqueue thread w on this mutex.
+void Mutex::Fer(PerThreadSynch *w) {
SchedulingGuard::ScopedDisable disable_rescheduling;
- int c = 0;
- ABSL_RAW_CHECK(w->waitp->cond == nullptr,
- "Mutex::Fer while waiting on Condition");
- ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
- "Mutex::Fer while in timed wait");
- ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
- "Mutex::Fer with pending CondVar queueing");
- for (;;) {
- intptr_t v = mu_.load(std::memory_order_relaxed);
- // Note: must not queue if the mutex is unlocked (nobody will wake it).
- // For example, we can have only kMuWait (conditional) or maybe
- // kMuWait|kMuWrWait.
- // conflicting != 0 implies that the waking thread cannot currently take
- // the mutex, which in turn implies that someone else has it and can wake
- // us if we queue.
- const intptr_t conflicting =
- kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
- if ((v & conflicting) == 0) {
- w->next = nullptr;
- w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
- IncrementSynchSem(this, w);
- return;
- } else {
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
- // This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
- ABSL_RAW_CHECK(new_h != nullptr,
- "Enqueue failed"); // we must queue ourselves
- if (mu_.compare_exchange_strong(
- v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
- std::memory_order_release, std::memory_order_relaxed)) {
- return;
- }
- } else if ((v & kMuSpin) == 0 &&
- mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
- ABSL_RAW_CHECK(new_h != nullptr,
- "Enqueue failed"); // we must queue ourselves
- do {
- v = mu_.load(std::memory_order_relaxed);
- } while (!mu_.compare_exchange_weak(
- v,
- (v & kMuLow & ~kMuSpin) | kMuWait |
- reinterpret_cast<intptr_t>(new_h),
- std::memory_order_release, std::memory_order_relaxed));
- return;
- }
- }
+ int c = 0;
+ ABSL_RAW_CHECK(w->waitp->cond == nullptr,
+ "Mutex::Fer while waiting on Condition");
+ ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
+ "Mutex::Fer while in timed wait");
+ ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
+ "Mutex::Fer with pending CondVar queueing");
+ for (;;) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // Note: must not queue if the mutex is unlocked (nobody will wake it).
+ // For example, we can have only kMuWait (conditional) or maybe
+ // kMuWait|kMuWrWait.
+ // conflicting != 0 implies that the waking thread cannot currently take
+ // the mutex, which in turn implies that someone else has it and can wake
+ // us if we queue.
+ const intptr_t conflicting =
+ kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
+ if ((v & conflicting) == 0) {
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+ return;
+ } else {
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ do {
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v,
+ (v & kMuLow & ~kMuSpin) | kMuWait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ return;
+ }
+ }
c = synchronization_internal::MutexDelay(c, GENTLE);
- }
-}
-
-void Mutex::AssertHeld() const {
- if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
- SynchEvent *e = GetSynchEvent(this);
- ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
- static_cast<const void *>(this),
- (e == nullptr ? "" : e->name));
- }
-}
-
-void Mutex::AssertReaderHeld() const {
- if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
- SynchEvent *e = GetSynchEvent(this);
- ABSL_RAW_LOG(
- FATAL, "thread should hold at least a read lock on Mutex %p %s",
- static_cast<const void *>(this), (e == nullptr ? "" : e->name));
- }
-}
-
-// -------------------------------- condition variables
-static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
-static const intptr_t kCvEvent = 0x0002L; // record events
-
-static const intptr_t kCvLow = 0x0003L; // low order bits of CV
-
-// Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
-
-static_assert(PerThreadSynch::kAlignment > kCvLow,
- "PerThreadSynch::kAlignment must be greater than kCvLow");
-
-void CondVar::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
- e->log = true;
- UnrefSynchEvent(e);
-}
-
-CondVar::~CondVar() {
- if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
- ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
- }
-}
-
-
-// Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+ }
+}
+
+void Mutex::AssertHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
+ static_cast<const void *>(this),
+ (e == nullptr ? "" : e->name));
+ }
+}
+
+void Mutex::AssertReaderHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(
+ FATAL, "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ }
+}
+
+// -------------------------------- condition variables
+static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
+static const intptr_t kCvEvent = 0x0002L; // record events
+
+static const intptr_t kCvLow = 0x0003L; // low order bits of CV
+
+// Hack to make constant values available to gdb pretty printer
+enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+
+static_assert(PerThreadSynch::kAlignment > kCvLow,
+ "PerThreadSynch::kAlignment must be greater than kCvLow");
+
+void CondVar::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+CondVar::~CondVar() {
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
+ }
+}
+
+
+// Remove thread s from the list of waiters on this condition variable.
+void CondVar::Remove(PerThreadSynch *s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
- intptr_t v;
- int c = 0;
- for (v = cv_.load(std::memory_order_relaxed);;
- v = cv_.load(std::memory_order_relaxed)) {
- if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- if (h != nullptr) {
- PerThreadSynch *w = h;
- while (w->next != s && w->next != h) { // search for thread
- w = w->next;
- }
- if (w->next == s) { // found thread; remove it
- w->next = s->next;
- if (h == s) {
- h = (w == s) ? nullptr : w;
- }
- s->next = nullptr;
- s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
- }
- }
- // release spinlock
- cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
- std::memory_order_release);
- return;
- } else {
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed);;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w = h;
+ while (w->next != s && w->next != h) { // search for thread
+ w = w->next;
+ }
+ if (w->next == s) { // found thread; remove it
+ w->next = s->next;
+ if (h == s) {
+ h = (w == s) ? nullptr : w;
+ }
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ return;
+ } else {
// try again after a delay
c = synchronization_internal::MutexDelay(c, GENTLE);
- }
- }
-}
-
-// Queue thread waitp->thread on condition variable word cv_word using
-// wait parameters waitp.
-// We split this into a separate routine, rather than simply doing it as part
-// of WaitCommon(). If we were to queue ourselves on the condition variable
-// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
-// the logging code, or via a Condition function) and might potentially attempt
-// to block this thread. That would be a problem if the thread were already on
-// a the condition variable waiter queue. Thus, we use the waitp->cv_word
-// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
-// condition variable queue just before the mutex is to be unlocked, and (most
-// importantly) after any call to an external routine that might re-enter the
-// mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
- // This thread might be transferred to the Mutex queue by Fer() when
- // we are woken. To make sure that is what happens, Enqueue() doesn't
- // call CondVarEnqueue() again but instead uses its normal code. We
- // must do this before we queue ourselves so that cv_word will be null
- // when seen by the dequeuer, who may wish immediately to requeue
- // this thread on another queue.
- std::atomic<intptr_t> *cv_word = waitp->cv_word;
- waitp->cv_word = nullptr;
-
- intptr_t v = cv_word->load(std::memory_order_relaxed);
- int c = 0;
- while ((v & kCvSpin) != 0 || // acquire spinlock
- !cv_word->compare_exchange_weak(v, v | kCvSpin,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
+ }
+ }
+}
+
+// Queue thread waitp->thread on condition variable word cv_word using
+// wait parameters waitp.
+// We split this into a separate routine, rather than simply doing it as part
+// of WaitCommon(). If we were to queue ourselves on the condition variable
+// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
+// the logging code, or via a Condition function) and might potentially attempt
+// to block this thread. That would be a problem if the thread were already on
+// a the condition variable waiter queue. Thus, we use the waitp->cv_word
+// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
+// condition variable queue just before the mutex is to be unlocked, and (most
+// importantly) after any call to an external routine that might re-enter the
+// mutex code.
+static void CondVarEnqueue(SynchWaitParams *waitp) {
+ // This thread might be transferred to the Mutex queue by Fer() when
+ // we are woken. To make sure that is what happens, Enqueue() doesn't
+ // call CondVarEnqueue() again but instead uses its normal code. We
+ // must do this before we queue ourselves so that cv_word will be null
+ // when seen by the dequeuer, who may wish immediately to requeue
+ // this thread on another queue.
+ std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ waitp->cv_word = nullptr;
+
+ intptr_t v = cv_word->load(std::memory_order_relaxed);
+ int c = 0;
+ while ((v & kCvSpin) != 0 || // acquire spinlock
+ !cv_word->compare_exchange_weak(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
c = synchronization_internal::MutexDelay(c, GENTLE);
- v = cv_word->load(std::memory_order_relaxed);
- }
- ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
- waitp->thread->waitp = waitp; // prepare ourselves for waiting
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- if (h == nullptr) { // add this thread to waiter list
- waitp->thread->next = waitp->thread;
- } else {
- waitp->thread->next = h->next;
- h->next = waitp->thread;
- }
- waitp->thread->state.store(PerThreadSynch::kQueued,
- std::memory_order_relaxed);
- cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
- std::memory_order_release);
-}
-
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
- bool rc = false; // return value; true iff we timed-out
-
- intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
- Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
- ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
-
- // maybe trace this call
- intptr_t v = cv_.load(std::memory_order_relaxed);
- cond_var_tracer("Wait", this);
- if ((v & kCvEvent) != 0) {
- PostSynchEvent(this, SYNCH_EV_WAIT);
- }
-
- // Release mu and wait on condition variable.
- SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
- Synch_GetPerThreadAnnotated(mutex), &cv_);
- // UnlockSlow() will call CondVarEnqueue() just before releasing the
- // Mutex, thus queuing this thread on the condition variable. See
- // CondVarEnqueue() for the reasons.
- mutex->UnlockSlow(&waitp);
-
- // wait for signal
- while (waitp.thread->state.load(std::memory_order_acquire) ==
- PerThreadSynch::kQueued) {
- if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
- this->Remove(waitp.thread);
- rc = true;
- }
- }
-
- ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
- waitp.thread->waitp = nullptr; // cleanup
-
- // maybe trace this call
- cond_var_tracer("Unwait", this);
- if ((v & kCvEvent) != 0) {
- PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
- }
-
- // From synchronization point of view Wait is unlock of the mutex followed
- // by lock of the mutex. We've annotated start of unlock in the beginning
- // of the function. Now, finish unlock and annotate lock of the mutex.
- // (Trans is effectively lock).
- ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
- ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
- mutex->Trans(mutex_how); // Reacquire mutex
- ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
- return rc;
-}
-
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
-}
-
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
- return WaitCommon(mu, KernelTimeout(deadline));
-}
-
-void CondVar::Wait(Mutex *mu) {
- WaitCommon(mu, KernelTimeout::Never());
-}
-
-// Wake thread w
-// If it was a timed wait, w will be waiting on w->cv
-// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
-// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
- if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
- // The waiting thread only needs to observe "w->state == kAvailable" to be
- // released, we must cache "cvmu" before clearing "next".
- Mutex *mu = w->waitp->cvmu;
- w->next = nullptr;
- w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
- Mutex::IncrementSynchSem(mu, w);
- } else {
- w->waitp->cvmu->Fer(w);
- }
-}
-
-void CondVar::Signal() {
+ v = cv_word->load(std::memory_order_relaxed);
+ }
+ ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h == nullptr) { // add this thread to waiter list
+ waitp->thread->next = waitp->thread;
+ } else {
+ waitp->thread->next = h->next;
+ h->next = waitp->thread;
+ }
+ waitp->thread->state.store(PerThreadSynch::kQueued,
+ std::memory_order_relaxed);
+ cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
+ std::memory_order_release);
+}
+
+bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
+
+ intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
+ Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
+
+ // maybe trace this call
+ intptr_t v = cv_.load(std::memory_order_relaxed);
+ cond_var_tracer("Wait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT);
+ }
+
+ // Release mu and wait on condition variable.
+ SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
+ Synch_GetPerThreadAnnotated(mutex), &cv_);
+ // UnlockSlow() will call CondVarEnqueue() just before releasing the
+ // Mutex, thus queuing this thread on the condition variable. See
+ // CondVarEnqueue() for the reasons.
+ mutex->UnlockSlow(&waitp);
+
+ // wait for signal
+ while (waitp.thread->state.load(std::memory_order_acquire) ==
+ PerThreadSynch::kQueued) {
+ if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+ this->Remove(waitp.thread);
+ rc = true;
+ }
+ }
+
+ ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
+ waitp.thread->waitp = nullptr; // cleanup
+
+ // maybe trace this call
+ cond_var_tracer("Unwait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
+ }
+
+ // From synchronization point of view Wait is unlock of the mutex followed
+ // by lock of the mutex. We've annotated start of unlock in the beginning
+ // of the function. Now, finish unlock and annotate lock of the mutex.
+ // (Trans is effectively lock).
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
+ mutex->Trans(mutex_how); // Reacquire mutex
+ ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
+ return rc;
+}
+
+bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
+ return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+}
+
+bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+ return WaitCommon(mu, KernelTimeout(deadline));
+}
+
+void CondVar::Wait(Mutex *mu) {
+ WaitCommon(mu, KernelTimeout::Never());
+}
+
+// Wake thread w
+// If it was a timed wait, w will be waiting on w->cv
+// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
+// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
+void CondVar::Wakeup(PerThreadSynch *w) {
+ if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
+ // The waiting thread only needs to observe "w->state == kAvailable" to be
+ // released, we must cache "cvmu" before clearing "next".
+ Mutex *mu = w->waitp->cvmu;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ Mutex::IncrementSynchSem(mu, w);
+ } else {
+ w->waitp->cvmu->Fer(w);
+ }
+}
+
+void CondVar::Signal() {
SchedulingGuard::ScopedDisable disable_rescheduling;
- ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
- intptr_t v;
- int c = 0;
- for (v = cv_.load(std::memory_order_relaxed); v != 0;
- v = cv_.load(std::memory_order_relaxed)) {
- if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- PerThreadSynch *w = nullptr;
- if (h != nullptr) { // remove first waiter
- w = h->next;
- if (w == h) {
- h = nullptr;
- } else {
- h->next = w->next;
- }
- }
- // release spinlock
- cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
- std::memory_order_release);
- if (w != nullptr) {
- CondVar::Wakeup(w); // wake waiter, if there was one
- cond_var_tracer("Signal wakeup", this);
- }
- if ((v & kCvEvent) != 0) {
- PostSynchEvent(this, SYNCH_EV_SIGNAL);
- }
- ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
- return;
- } else {
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch *w = nullptr;
+ if (h != nullptr) { // remove first waiter
+ w = h->next;
+ if (w == h) {
+ h = nullptr;
+ } else {
+ h->next = w->next;
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ if (w != nullptr) {
+ CondVar::Wakeup(w); // wake waiter, if there was one
+ cond_var_tracer("Signal wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNAL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+ return;
+ } else {
c = synchronization_internal::MutexDelay(c, GENTLE);
- }
- }
- ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-}
-
-void CondVar::SignalAll () {
- ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
- intptr_t v;
- int c = 0;
- for (v = cv_.load(std::memory_order_relaxed); v != 0;
- v = cv_.load(std::memory_order_relaxed)) {
- // empty the list if spinlock free
- // We do this by simply setting the list to empty using
- // compare and swap. We then have the entire list in our hands,
- // which cannot be changing since we grabbed it while no one
- // held the lock.
- if ((v & kCvSpin) == 0 &&
- cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
- std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- if (h != nullptr) {
- PerThreadSynch *w;
- PerThreadSynch *n = h->next;
- do { // for every thread, wake it up
- w = n;
- n = n->next;
- CondVar::Wakeup(w);
- } while (w != h);
- cond_var_tracer("SignalAll wakeup", this);
- }
- if ((v & kCvEvent) != 0) {
- PostSynchEvent(this, SYNCH_EV_SIGNALALL);
- }
- ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
- return;
- } else {
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+}
+
+void CondVar::SignalAll () {
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ // empty the list if spinlock free
+ // We do this by simply setting the list to empty using
+ // compare and swap. We then have the entire list in our hands,
+ // which cannot be changing since we grabbed it while no one
+ // held the lock.
+ if ((v & kCvSpin) == 0 &&
+ cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w;
+ PerThreadSynch *n = h->next;
+ do { // for every thread, wake it up
+ w = n;
+ n = n->next;
+ CondVar::Wakeup(w);
+ } while (w != h);
+ cond_var_tracer("SignalAll wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNALALL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+ return;
+ } else {
// try again after a delay
c = synchronization_internal::MutexDelay(c, GENTLE);
- }
- }
- ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-}
-
-void ReleasableMutexLock::Release() {
- ABSL_RAW_CHECK(this->mu_ != nullptr,
- "ReleasableMutexLock::Release may only be called once");
- this->mu_->Unlock();
- this->mu_ = nullptr;
-}
-
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+}
+
+void ReleasableMutexLock::Release() {
+ ABSL_RAW_CHECK(this->mu_ != nullptr,
+ "ReleasableMutexLock::Release may only be called once");
+ this->mu_->Unlock();
+ this->mu_ = nullptr;
+}
+
#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
-#else
-#define __tsan_read1(addr) // do nothing if TSan not enabled
-#endif
-
-// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
- // ThreadSanitizer does not instrument this file for memory accesses.
- // This function dereferences a user variable that can participate
- // in a data race, so we need to manually tell TSan about this memory access.
- __tsan_read1(arg);
- return *(static_cast<bool *>(arg));
-}
-
-Condition::Condition() {} // null constructor, used for kTrue only
-const Condition Condition::kTrue;
-
-Condition::Condition(bool (*func)(void *), void *arg)
- : eval_(&CallVoidPtrFunction),
- function_(func),
- method_(nullptr),
- arg_(arg) {}
-
-bool Condition::CallVoidPtrFunction(const Condition *c) {
- return (*c->function_)(c->arg_);
-}
-
-Condition::Condition(const bool *cond)
- : eval_(CallVoidPtrFunction),
- function_(Dereference),
- method_(nullptr),
- // const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {}
-
-bool Condition::Eval() const {
- // eval_ == null for kTrue
- return (this->eval_ == nullptr) || (*this->eval_)(this);
-}
-
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
- if (a == nullptr) {
- return b == nullptr || b->eval_ == nullptr;
- }
- if (b == nullptr || b->eval_ == nullptr) {
- return a->eval_ == nullptr;
- }
- return a->eval_ == b->eval_ && a->function_ == b->function_ &&
- a->arg_ == b->arg_ && a->method_ == b->method_;
-}
-
+extern "C" void __tsan_read1(void *addr);
+#else
+#define __tsan_read1(addr) // do nothing if TSan not enabled
+#endif
+
+// A function that just returns its argument, dereferenced
+static bool Dereference(void *arg) {
+ // ThreadSanitizer does not instrument this file for memory accesses.
+ // This function dereferences a user variable that can participate
+ // in a data race, so we need to manually tell TSan about this memory access.
+ __tsan_read1(arg);
+ return *(static_cast<bool *>(arg));
+}
+
+Condition::Condition() {} // null constructor, used for kTrue only
+const Condition Condition::kTrue;
+
+Condition::Condition(bool (*func)(void *), void *arg)
+ : eval_(&CallVoidPtrFunction),
+ function_(func),
+ method_(nullptr),
+ arg_(arg) {}
+
+bool Condition::CallVoidPtrFunction(const Condition *c) {
+ return (*c->function_)(c->arg_);
+}
+
+Condition::Condition(const bool *cond)
+ : eval_(CallVoidPtrFunction),
+ function_(Dereference),
+ method_(nullptr),
+ // const_cast is safe since Dereference does not modify arg
+ arg_(const_cast<bool *>(cond)) {}
+
+bool Condition::Eval() const {
+ // eval_ == null for kTrue
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
+}
+
+bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+ if (a == nullptr) {
+ return b == nullptr || b->eval_ == nullptr;
+ }
+ if (b == nullptr || b->eval_ == nullptr) {
+ return a->eval_ == nullptr;
+ }
+ return a->eval_ == b->eval_ && a->function_ == b->function_ &&
+ a->arg_ == b->arg_ && a->method_ == b->method_;
+}
+
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
index 7d4c410505..38338f24df 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/mutex.h
@@ -1,531 +1,531 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// mutex.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
-// most common type of synchronization primitive for facilitating locks on
-// shared resources. A mutex is used to prevent multiple threads from accessing
-// and/or writing to a shared resource concurrently.
-//
-// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
-// features:
-// * Conditional predicates intrinsic to the `Mutex` object
-// * Shared/reader locks, in addition to standard exclusive/writer locks
-// * Deadlock detection and debug support.
-//
-// The following helper classes are also defined within this file:
-//
-// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
-// write access within the current scope.
-//
-// ReaderMutexLock
-// - An RAII wrapper to acquire and release a `Mutex` for shared/read
-// access within the current scope.
-//
-// WriterMutexLock
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// mutex.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
+// most common type of synchronization primitive for facilitating locks on
+// shared resources. A mutex is used to prevent multiple threads from accessing
+// and/or writing to a shared resource concurrently.
+//
+// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
+// features:
+// * Conditional predicates intrinsic to the `Mutex` object
+// * Shared/reader locks, in addition to standard exclusive/writer locks
+// * Deadlock detection and debug support.
+//
+// The following helper classes are also defined within this file:
+//
+// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
+// write access within the current scope.
+//
+// ReaderMutexLock
+// - An RAII wrapper to acquire and release a `Mutex` for shared/read
+// access within the current scope.
+//
+// WriterMutexLock
// - Effectively an alias for `MutexLock` above, designed for use in
// distinguishing reader and writer locks within code.
-//
-// In addition to simple mutex locks, this file also defines ways to perform
-// locking under certain conditions.
-//
+//
+// In addition to simple mutex locks, this file also defines ways to perform
+// locking under certain conditions.
+//
// Condition - (Preferred) Used to wait for a particular predicate that
// depends on state protected by the `Mutex` to become true.
// CondVar - A lower-level variant of `Condition` that relies on
// application code to explicitly signal the `CondVar` when
// a condition has been met.
-//
-// See below for more information on using `Condition` or `CondVar`.
-//
-// Mutexes and mutex behavior can be quite complicated. The information within
-// this header file is limited, as a result. Please consult the Mutex guide for
-// more complete information and examples.
-
-#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
-#define ABSL_SYNCHRONIZATION_MUTEX_H_
-
-#include <atomic>
-#include <cstdint>
-#include <string>
-
-#include "absl/base/const_init.h"
-#include "absl/base/internal/identity.h"
-#include "absl/base/internal/low_level_alloc.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/port.h"
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-#include "absl/time/time.h"
-
-namespace absl {
+//
+// See below for more information on using `Condition` or `CondVar`.
+//
+// Mutexes and mutex behavior can be quite complicated. The information within
+// this header file is limited, as a result. Please consult the Mutex guide for
+// more complete information and examples.
+
+#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
+#define ABSL_SYNCHRONIZATION_MUTEX_H_
+
+#include <atomic>
+#include <cstdint>
+#include <string>
+
+#include "absl/base/const_init.h"
+#include "absl/base/internal/identity.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/time/time.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-class Condition;
-struct SynchWaitParams;
-
-// -----------------------------------------------------------------------------
-// Mutex
-// -----------------------------------------------------------------------------
-//
-// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
-// on some resource, typically a variable or data structure with associated
-// invariants. Proper usage of mutexes prevents concurrent access by different
-// threads to the same resource.
-//
-// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
-// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
-// Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
-// locks, as this is the most common way to use a mutex.
-//
-// The `Mutex` state machine for basic lock/unlock operations is quite simple:
-//
-// | | Lock() | Unlock() |
-// |----------------+------------+----------|
-// | Free | Exclusive | invalid |
-// | Exclusive | blocks | Free |
-//
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
-//
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
-// crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
-//
-// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
-// is, however, approximately fair over long periods, and starvation-free for
-// threads at the same priority.
-//
-// The lock/unlock primitives are now annotated with lock annotations
-// defined in (base/thread_annotations.h). When writing multi-threaded code,
-// you should use lock annotations whenever possible to document your lock
-// synchronization policy. Besides acting as documentation, these annotations
-// also help compilers or static analysis tools to identify and warn about
-// issues that could potentially result in race conditions and deadlocks.
-//
-// For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
-//
-// See also `MutexLock`, below, for scoped `Mutex` acquisition.
-
-class ABSL_LOCKABLE Mutex {
- public:
- // Creates a `Mutex` that is not held by anyone. This constructor is
- // typically used for Mutexes allocated on the heap or the stack.
- //
- // To create `Mutex` instances with static storage duration
- // (e.g. a namespace-scoped or global variable), see
- // `Mutex::Mutex(absl::kConstInit)` below instead.
- Mutex();
-
- // Creates a mutex with static storage duration. A global variable
- // constructed this way avoids the lifetime issues that can occur on program
- // startup and shutdown. (See absl/base/const_init.h.)
- //
- // For Mutexes allocated on the heap and stack, instead use the default
- // constructor, which can interact more fully with the thread sanitizer.
- //
- // Example usage:
- // namespace foo {
+
+class Condition;
+struct SynchWaitParams;
+
+// -----------------------------------------------------------------------------
+// Mutex
+// -----------------------------------------------------------------------------
+//
+// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
+// on some resource, typically a variable or data structure with associated
+// invariants. Proper usage of mutexes prevents concurrent access by different
+// threads to the same resource.
+//
+// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
+// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
+// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// Mutex. During the span of time between the Lock() and Unlock() operations,
+// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// locks, as this is the most common way to use a mutex.
+//
+// The `Mutex` state machine for basic lock/unlock operations is quite simple:
+//
+// | | Lock() | Unlock() |
+// |----------------+------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks | Free |
+//
+// Attempts to `Unlock()` must originate from the thread that performed the
+// corresponding `Lock()` operation.
+//
+// An "invalid" operation is disallowed by the API. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including but not limited to
+// crashing with a useful error message, silently succeeding, or corrupting
+// data structures. In debug mode, the implementation attempts to crash with a
+// useful error message.
+//
+// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
+// is, however, approximately fair over long periods, and starvation-free for
+// threads at the same priority.
+//
+// The lock/unlock primitives are now annotated with lock annotations
+// defined in (base/thread_annotations.h). When writing multi-threaded code,
+// you should use lock annotations whenever possible to document your lock
+// synchronization policy. Besides acting as documentation, these annotations
+// also help compilers or static analysis tools to identify and warn about
+// issues that could potentially result in race conditions and deadlocks.
+//
+// For more information about the lock annotations, please see
+// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
+// in the Clang documentation.
+//
+// See also `MutexLock`, below, for scoped `Mutex` acquisition.
+
+class ABSL_LOCKABLE Mutex {
+ public:
+ // Creates a `Mutex` that is not held by anyone. This constructor is
+ // typically used for Mutexes allocated on the heap or the stack.
+ //
+ // To create `Mutex` instances with static storage duration
+ // (e.g. a namespace-scoped or global variable), see
+ // `Mutex::Mutex(absl::kConstInit)` below instead.
+ Mutex();
+
+ // Creates a mutex with static storage duration. A global variable
+ // constructed this way avoids the lifetime issues that can occur on program
+ // startup and shutdown. (See absl/base/const_init.h.)
+ //
+ // For Mutexes allocated on the heap and stack, instead use the default
+ // constructor, which can interact more fully with the thread sanitizer.
+ //
+ // Example usage:
+ // namespace foo {
// ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
- // }
- explicit constexpr Mutex(absl::ConstInitType);
-
- ~Mutex();
-
- // Mutex::Lock()
- //
- // Blocks the calling thread, if necessary, until this `Mutex` is free, and
- // then acquires it exclusively. (This lock is also known as a "write lock.")
- void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
-
- // Mutex::Unlock()
- //
- // Releases this `Mutex` and returns it from the exclusive/write state to the
+ // }
+ explicit constexpr Mutex(absl::ConstInitType);
+
+ ~Mutex();
+
+ // Mutex::Lock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is free, and
+ // then acquires it exclusively. (This lock is also known as a "write lock.")
+ void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
+
+ // Mutex::Unlock()
+ //
+ // Releases this `Mutex` and returns it from the exclusive/write state to the
// free state. Calling thread must hold the `Mutex` exclusively.
- void Unlock() ABSL_UNLOCK_FUNCTION();
-
- // Mutex::TryLock()
- //
- // If the mutex can be acquired without blocking, does so exclusively and
- // returns `true`. Otherwise, returns `false`. Returns `true` with high
- // probability if the `Mutex` was free.
- bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
-
- // Mutex::AssertHeld()
- //
- // Return immediately if this thread holds the `Mutex` exclusively (in write
- // mode). Otherwise, may report an error (typically by crashing with a
- // diagnostic), or may return immediately.
- void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
-
- // ---------------------------------------------------------------------------
- // Reader-Writer Locking
- // ---------------------------------------------------------------------------
-
- // A Mutex can also be used as a starvation-free reader-writer lock.
- // Neither read-locks nor write-locks are reentrant/recursive to avoid
- // potential client programming errors.
- //
- // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
- // `Unlock()` and `TryLock()` methods for use within applications mixing
- // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
- // manner can make locking behavior clearer when mixing read and write modes.
- //
- // Introducing reader locks necessarily complicates the `Mutex` state
- // machine somewhat. The table below illustrates the allowed state transitions
- // of a mutex in such cases. Note that ReaderLock() may block even if the lock
- // is held in shared mode; this occurs when another thread is blocked on a
- // call to WriterLock().
- //
- // ---------------------------------------------------------------------------
- // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
- // ---------------------------------------------------------------------------
- // State
- // ---------------------------------------------------------------------------
- // Free Exclusive invalid Shared(1) invalid
- // Shared(1) blocks invalid Shared(2) or blocks Free
- // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
- // Exclusive blocks Free blocks invalid
- // ---------------------------------------------------------------------------
- //
- // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
-
- // Mutex::ReaderLock()
- //
- // Blocks the calling thread, if necessary, until this `Mutex` is either free,
- // or in shared mode, and then acquires a share of it. Note that
- // `ReaderLock()` will block if some other thread has an exclusive/writer lock
- // on the mutex.
-
- void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
-
- // Mutex::ReaderUnlock()
- //
- // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
- // the free state if this thread holds the last reader lock on the mutex. Note
- // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
- void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
-
- // Mutex::ReaderTryLock()
- //
- // If the mutex can be acquired without blocking, acquires this mutex for
- // shared access and returns `true`. Otherwise, returns `false`. Returns
- // `true` with high probability if the `Mutex` was free or shared.
- bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
-
- // Mutex::AssertReaderHeld()
- //
- // Returns immediately if this thread holds the `Mutex` in at least shared
- // mode (read mode). Otherwise, may report an error (typically by
- // crashing with a diagnostic), or may return immediately.
- void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
-
- // Mutex::WriterLock()
- // Mutex::WriterUnlock()
- // Mutex::WriterTryLock()
- //
- // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
- //
- // These methods may be used (along with the complementary `Reader*()`
- // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
- // etc.) from reader/writer lock usage.
- void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
-
- void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
-
- bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
- return this->TryLock();
- }
-
- // ---------------------------------------------------------------------------
- // Conditional Critical Regions
- // ---------------------------------------------------------------------------
-
- // Conditional usage of a `Mutex` can occur using two distinct paradigms:
- //
- // * Use of `Mutex` member functions with `Condition` objects.
- // * Use of the separate `CondVar` abstraction.
- //
- // In general, prefer use of `Condition` and the `Mutex` member functions
- // listed below over `CondVar`. When there are multiple threads waiting on
- // distinctly different conditions, however, a battery of `CondVar`s may be
- // more efficient. This section discusses use of `Condition` objects.
- //
- // `Mutex` contains member functions for performing lock operations only under
- // certain conditions, of class `Condition`. For correctness, the `Condition`
- // must return a boolean that is a pure function, only of state protected by
- // the `Mutex`. The condition must be invariant w.r.t. environmental state
- // such as thread, cpu id, or time, and must be `noexcept`. The condition will
- // always be invoked with the mutex held in at least read mode, so you should
- // not block it for long periods or sleep it on a timer.
- //
- // Since a condition must not depend directly on the current time, use
- // `*WithTimeout()` member function variants to make your condition
- // effectively true after a given duration, or `*WithDeadline()` variants to
- // make your condition effectively true after a given time.
- //
- // The condition function should have no side-effects aside from debug
- // logging; as a special exception, the function may acquire other mutexes
- // provided it releases all those that it acquires. (This exception was
- // required to allow logging.)
-
- // Mutex::Await()
- //
- // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
- // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
- // same mode in which it was previously held. If the condition is initially
- // `true`, `Await()` *may* skip the release/re-acquire step.
- //
- // `Await()` requires that this thread holds this `Mutex` in some mode.
- void Await(const Condition &cond);
-
- // Mutex::LockWhen()
- // Mutex::ReaderLockWhen()
- // Mutex::WriterLockWhen()
- //
- // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
- // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
- // logically equivalent to `*Lock(); Await();` though they may have different
- // performance characteristics.
- void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
-
- void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
-
- void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
- this->LockWhen(cond);
- }
-
- // ---------------------------------------------------------------------------
- // Mutex Variants with Timeouts/Deadlines
- // ---------------------------------------------------------------------------
-
- // Mutex::AwaitWithTimeout()
- // Mutex::AwaitWithDeadline()
- //
+ void Unlock() ABSL_UNLOCK_FUNCTION();
+
+ // Mutex::TryLock()
+ //
+ // If the mutex can be acquired without blocking, does so exclusively and
+ // returns `true`. Otherwise, returns `false`. Returns `true` with high
+ // probability if the `Mutex` was free.
+ bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertHeld()
+ //
+ // Return immediately if this thread holds the `Mutex` exclusively (in write
+ // mode). Otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
+
+ // ---------------------------------------------------------------------------
+ // Reader-Writer Locking
+ // ---------------------------------------------------------------------------
+
+ // A Mutex can also be used as a starvation-free reader-writer lock.
+ // Neither read-locks nor write-locks are reentrant/recursive to avoid
+ // potential client programming errors.
+ //
+ // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
+ // `Unlock()` and `TryLock()` methods for use within applications mixing
+ // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
+ // manner can make locking behavior clearer when mixing read and write modes.
+ //
+ // Introducing reader locks necessarily complicates the `Mutex` state
+ // machine somewhat. The table below illustrates the allowed state transitions
+ // of a mutex in such cases. Note that ReaderLock() may block even if the lock
+ // is held in shared mode; this occurs when another thread is blocked on a
+ // call to WriterLock().
+ //
+ // ---------------------------------------------------------------------------
+ // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
+ // ---------------------------------------------------------------------------
+ // State
+ // ---------------------------------------------------------------------------
+ // Free Exclusive invalid Shared(1) invalid
+ // Shared(1) blocks invalid Shared(2) or blocks Free
+ // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
+ // Exclusive blocks Free blocks invalid
+ // ---------------------------------------------------------------------------
+ //
+ // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
+
+ // Mutex::ReaderLock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is either free,
+ // or in shared mode, and then acquires a share of it. Note that
+ // `ReaderLock()` will block if some other thread has an exclusive/writer lock
+ // on the mutex.
+
+ void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
+
+ // Mutex::ReaderUnlock()
+ //
+ // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
+ // the free state if this thread holds the last reader lock on the mutex. Note
+ // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
+ void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
+
+ // Mutex::ReaderTryLock()
+ //
+ // If the mutex can be acquired without blocking, acquires this mutex for
+ // shared access and returns `true`. Otherwise, returns `false`. Returns
+ // `true` with high probability if the `Mutex` was free or shared.
+ bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertReaderHeld()
+ //
+ // Returns immediately if this thread holds the `Mutex` in at least shared
+ // mode (read mode). Otherwise, may report an error (typically by
+ // crashing with a diagnostic), or may return immediately.
+ void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
+
+ // Mutex::WriterLock()
+ // Mutex::WriterUnlock()
+ // Mutex::WriterTryLock()
+ //
+ // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
+ //
+ // These methods may be used (along with the complementary `Reader*()`
+ // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // etc.) from reader/writer lock usage.
+ void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
+
+ void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
+
+ bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ return this->TryLock();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Conditional Critical Regions
+ // ---------------------------------------------------------------------------
+
+ // Conditional usage of a `Mutex` can occur using two distinct paradigms:
+ //
+ // * Use of `Mutex` member functions with `Condition` objects.
+ // * Use of the separate `CondVar` abstraction.
+ //
+ // In general, prefer use of `Condition` and the `Mutex` member functions
+ // listed below over `CondVar`. When there are multiple threads waiting on
+ // distinctly different conditions, however, a battery of `CondVar`s may be
+ // more efficient. This section discusses use of `Condition` objects.
+ //
+ // `Mutex` contains member functions for performing lock operations only under
+ // certain conditions, of class `Condition`. For correctness, the `Condition`
+ // must return a boolean that is a pure function, only of state protected by
+ // the `Mutex`. The condition must be invariant w.r.t. environmental state
+ // such as thread, cpu id, or time, and must be `noexcept`. The condition will
+ // always be invoked with the mutex held in at least read mode, so you should
+ // not block it for long periods or sleep it on a timer.
+ //
+ // Since a condition must not depend directly on the current time, use
+ // `*WithTimeout()` member function variants to make your condition
+ // effectively true after a given duration, or `*WithDeadline()` variants to
+ // make your condition effectively true after a given time.
+ //
+ // The condition function should have no side-effects aside from debug
+ // logging; as a special exception, the function may acquire other mutexes
+ // provided it releases all those that it acquires. (This exception was
+ // required to allow logging.)
+
+ // Mutex::Await()
+ //
+ // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
+ // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
+ // same mode in which it was previously held. If the condition is initially
+ // `true`, `Await()` *may* skip the release/re-acquire step.
+ //
+ // `Await()` requires that this thread holds this `Mutex` in some mode.
+ void Await(const Condition &cond);
+
+ // Mutex::LockWhen()
+ // Mutex::ReaderLockWhen()
+ // Mutex::WriterLockWhen()
+ //
+ // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
+ // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
+ // logically equivalent to `*Lock(); Await();` though they may have different
+ // performance characteristics.
+ void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+
+ void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+
+ void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ this->LockWhen(cond);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Mutex Variants with Timeouts/Deadlines
+ // ---------------------------------------------------------------------------
+
+ // Mutex::AwaitWithTimeout()
+ // Mutex::AwaitWithDeadline()
+ //
// Unlocks this `Mutex` and blocks until simultaneously:
- // - either `cond` is true or the {timeout has expired, deadline has passed}
- // and
- // - this `Mutex` can be reacquired,
- // then reacquire this `Mutex` in the same mode in which it was previously
- // held, returning `true` iff `cond` is `true` on return.
- //
+ // - either `cond` is true or the {timeout has expired, deadline has passed}
+ // and
+ // - this `Mutex` can be reacquired,
+ // then reacquire this `Mutex` in the same mode in which it was previously
+ // held, returning `true` iff `cond` is `true` on return.
+ //
// If the condition is initially `true`, the implementation *may* skip the
// release/re-acquire step and return immediately.
//
- // Deadlines in the past are equivalent to an immediate deadline.
- // Negative timeouts are equivalent to a zero timeout.
- //
- // This method requires that this thread holds this `Mutex` in some mode.
- bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
-
- bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
-
- // Mutex::LockWhenWithTimeout()
- // Mutex::ReaderLockWhenWithTimeout()
- // Mutex::WriterLockWhenWithTimeout()
- //
- // Blocks until simultaneously both:
- // - either `cond` is `true` or the timeout has expired, and
- // - this `Mutex` can be acquired,
- // then atomically acquires this `Mutex`, returning `true` iff `cond` is
- // `true` on return.
- //
- // Negative timeouts are equivalent to a zero timeout.
- bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
- ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
- ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
- ABSL_EXCLUSIVE_LOCK_FUNCTION() {
- return this->LockWhenWithTimeout(cond, timeout);
- }
-
- // Mutex::LockWhenWithDeadline()
- // Mutex::ReaderLockWhenWithDeadline()
- // Mutex::WriterLockWhenWithDeadline()
- //
- // Blocks until simultaneously both:
- // - either `cond` is `true` or the deadline has been passed, and
- // - this `Mutex` can be acquired,
- // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
- // on return.
- //
- // Deadlines in the past are equivalent to an immediate deadline.
- bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
- ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
- ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
- ABSL_EXCLUSIVE_LOCK_FUNCTION() {
- return this->LockWhenWithDeadline(cond, deadline);
- }
-
- // ---------------------------------------------------------------------------
- // Debug Support: Invariant Checking, Deadlock Detection, Logging.
- // ---------------------------------------------------------------------------
-
- // Mutex::EnableInvariantDebugging()
- //
- // If `invariant`!=null and if invariant debugging has been enabled globally,
- // cause `(*invariant)(arg)` to be called at moments when the invariant for
- // this `Mutex` should hold (for example: just after acquire, just before
- // release).
- //
- // The routine `invariant` should have no side-effects since it is not
- // guaranteed how many times it will be called; it should check the invariant
- // and crash if it does not hold. Enabling global invariant debugging may
- // substantially reduce `Mutex` performance; it should be set only for
- // non-production runs. Optimization options may also disable invariant
- // checks.
- void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
-
- // Mutex::EnableDebugLog()
- //
- // Cause all subsequent uses of this `Mutex` to be logged via
- // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
- // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
- //
- // Note: This method substantially reduces `Mutex` performance.
- void EnableDebugLog(const char *name);
-
- // Deadlock detection
-
- // Mutex::ForgetDeadlockInfo()
- //
- // Forget any deadlock-detection information previously gathered
- // about this `Mutex`. Call this method in debug mode when the lock ordering
- // of a `Mutex` changes.
- void ForgetDeadlockInfo();
-
- // Mutex::AssertNotHeld()
- //
- // Return immediately if this thread does not hold this `Mutex` in any
- // mode; otherwise, may report an error (typically by crashing with a
- // diagnostic), or may return immediately.
- //
- // Currently this check is performed only if all of:
- // - in debug mode
- // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
- // - number of locks concurrently held by this thread is not large.
- // are true.
- void AssertNotHeld() const;
-
- // Special cases.
-
- // A `MuHow` is a constant that indicates how a lock should be acquired.
- // Internal implementation detail. Clients should ignore.
- typedef const struct MuHowS *MuHow;
-
- // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
- //
- // Causes the `Mutex` implementation to prepare itself for re-entry caused by
- // future use of `Mutex` within a fatal signal handler. This method is
- // intended for use only for last-ditch attempts to log crash information.
- // It does not guarantee that attempts to use Mutexes within the handler will
- // not deadlock; it merely makes other faults less likely.
- //
- // WARNING: This routine must be invoked from a signal handler, and the
- // signal handler must either loop forever or terminate the process.
- // Attempts to return from (or `longjmp` out of) the signal handler once this
- // call has been made may cause arbitrary program behaviour including
- // crashes and deadlocks.
- static void InternalAttemptToUseMutexInFatalSignalHandler();
-
- private:
- std::atomic<intptr_t> mu_; // The Mutex state.
-
- // Post()/Wait() versus associated PerThreadSem; in class for required
- // friendship with PerThreadSem.
+ // Deadlines in the past are equivalent to an immediate deadline.
+ // Negative timeouts are equivalent to a zero timeout.
+ //
+ // This method requires that this thread holds this `Mutex` in some mode.
+ bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+
+ bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+
+ // Mutex::LockWhenWithTimeout()
+ // Mutex::ReaderLockWhenWithTimeout()
+ // Mutex::WriterLockWhenWithTimeout()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the timeout has expired, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this `Mutex`, returning `true` iff `cond` is
+ // `true` on return.
+ //
+ // Negative timeouts are equivalent to a zero timeout.
+ bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ ABSL_SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithTimeout(cond, timeout);
+ }
+
+ // Mutex::LockWhenWithDeadline()
+ // Mutex::ReaderLockWhenWithDeadline()
+ // Mutex::WriterLockWhenWithDeadline()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the deadline has been passed, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
+ // on return.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ ABSL_SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithDeadline(cond, deadline);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Debug Support: Invariant Checking, Deadlock Detection, Logging.
+ // ---------------------------------------------------------------------------
+
+ // Mutex::EnableInvariantDebugging()
+ //
+ // If `invariant`!=null and if invariant debugging has been enabled globally,
+ // cause `(*invariant)(arg)` to be called at moments when the invariant for
+ // this `Mutex` should hold (for example: just after acquire, just before
+ // release).
+ //
+ // The routine `invariant` should have no side-effects since it is not
+ // guaranteed how many times it will be called; it should check the invariant
+ // and crash if it does not hold. Enabling global invariant debugging may
+ // substantially reduce `Mutex` performance; it should be set only for
+ // non-production runs. Optimization options may also disable invariant
+ // checks.
+ void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+
+ // Mutex::EnableDebugLog()
+ //
+ // Cause all subsequent uses of this `Mutex` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
+ // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
+ //
+ // Note: This method substantially reduces `Mutex` performance.
+ void EnableDebugLog(const char *name);
+
+ // Deadlock detection
+
+ // Mutex::ForgetDeadlockInfo()
+ //
+ // Forget any deadlock-detection information previously gathered
+ // about this `Mutex`. Call this method in debug mode when the lock ordering
+ // of a `Mutex` changes.
+ void ForgetDeadlockInfo();
+
+ // Mutex::AssertNotHeld()
+ //
+ // Return immediately if this thread does not hold this `Mutex` in any
+ // mode; otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ //
+ // Currently this check is performed only if all of:
+ // - in debug mode
+ // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
+ // - number of locks concurrently held by this thread is not large.
+ // are true.
+ void AssertNotHeld() const;
+
+ // Special cases.
+
+ // A `MuHow` is a constant that indicates how a lock should be acquired.
+ // Internal implementation detail. Clients should ignore.
+ typedef const struct MuHowS *MuHow;
+
+ // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
+ //
+ // Causes the `Mutex` implementation to prepare itself for re-entry caused by
+ // future use of `Mutex` within a fatal signal handler. This method is
+ // intended for use only for last-ditch attempts to log crash information.
+ // It does not guarantee that attempts to use Mutexes within the handler will
+ // not deadlock; it merely makes other faults less likely.
+ //
+ // WARNING: This routine must be invoked from a signal handler, and the
+ // signal handler must either loop forever or terminate the process.
+ // Attempts to return from (or `longjmp` out of) the signal handler once this
+ // call has been made may cause arbitrary program behaviour including
+ // crashes and deadlocks.
+ static void InternalAttemptToUseMutexInFatalSignalHandler();
+
+ private:
+ std::atomic<intptr_t> mu_; // The Mutex state.
+
+ // Post()/Wait() versus associated PerThreadSem; in class for required
+ // friendship with PerThreadSem.
static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
synchronization_internal::KernelTimeout t);
-
- // slow path acquire
- void LockSlowLoop(SynchWaitParams *waitp, int flags);
- // wrappers around LockSlowLoop()
- bool LockSlowWithDeadline(MuHow how, const Condition *cond,
- synchronization_internal::KernelTimeout t,
- int flags);
- void LockSlow(MuHow how, const Condition *cond,
- int flags) ABSL_ATTRIBUTE_COLD;
- // slow path release
- void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
- // Common code between Await() and AwaitWithTimeout/Deadline()
- bool AwaitCommon(const Condition &cond,
- synchronization_internal::KernelTimeout t);
- // Attempt to remove thread s from queue.
- void TryRemove(base_internal::PerThreadSynch *s);
- // Block a thread on mutex.
- void Block(base_internal::PerThreadSynch *s);
- // Wake a thread; return successor.
- base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
-
- friend class CondVar; // for access to Trans()/Fer().
- void Trans(MuHow how); // used for CondVar->Mutex transfer
- void Fer(
- base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
-
- // Catch the error of writing Mutex when intending MutexLock.
- Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
-
- Mutex(const Mutex&) = delete;
- Mutex& operator=(const Mutex&) = delete;
-};
-
-// -----------------------------------------------------------------------------
-// Mutex RAII Wrappers
-// -----------------------------------------------------------------------------
-
-// MutexLock
-//
-// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
-// RAII.
-//
-// Example:
-//
-// Class Foo {
+
+ // slow path acquire
+ void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ // wrappers around LockSlowLoop()
+ bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ synchronization_internal::KernelTimeout t,
+ int flags);
+ void LockSlow(MuHow how, const Condition *cond,
+ int flags) ABSL_ATTRIBUTE_COLD;
+ // slow path release
+ void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+ // Common code between Await() and AwaitWithTimeout/Deadline()
+ bool AwaitCommon(const Condition &cond,
+ synchronization_internal::KernelTimeout t);
+ // Attempt to remove thread s from queue.
+ void TryRemove(base_internal::PerThreadSynch *s);
+ // Block a thread on mutex.
+ void Block(base_internal::PerThreadSynch *s);
+ // Wake a thread; return successor.
+ base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+
+ friend class CondVar; // for access to Trans()/Fer().
+ void Trans(MuHow how); // used for CondVar->Mutex transfer
+ void Fer(
+ base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Mutex RAII Wrappers
+// -----------------------------------------------------------------------------
+
+// MutexLock
+//
+// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
+// RAII.
+//
+// Example:
+//
+// Class Foo {
// public:
-// Foo::Bar* Baz() {
+// Foo::Bar* Baz() {
// MutexLock lock(&mu_);
-// ...
-// return bar;
-// }
-//
-// private:
+// ...
+// return bar;
+// }
+//
+// private:
// Mutex mu_;
-// };
-class ABSL_SCOPED_LOCKABLE MutexLock {
- public:
+// };
+class ABSL_SCOPED_LOCKABLE MutexLock {
+ public:
// Constructors
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
- explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
- this->mu_->Lock();
- }
-
+ explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ this->mu_->Lock();
+ }
+
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed.
@@ -535,113 +535,113 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
this->mu_->LockWhen(cond);
}
- MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
- MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
- MutexLock& operator=(const MutexLock&) = delete;
- MutexLock& operator=(MutexLock&&) = delete;
-
- ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
-
- private:
- Mutex *const mu_;
-};
-
-// ReaderMutexLock
-//
-// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
-// releases a shared lock on a `Mutex` via RAII.
-class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
- public:
- explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
- mu->ReaderLock();
- }
-
+ MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
+ MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
+ MutexLock& operator=(const MutexLock&) = delete;
+ MutexLock& operator=(MutexLock&&) = delete;
+
+ ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// ReaderMutexLock
+//
+// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a shared lock on a `Mutex` via RAII.
+class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
+ public:
+ explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ mu->ReaderLock();
+ }
+
explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLockWhen(cond);
}
- ReaderMutexLock(const ReaderMutexLock&) = delete;
- ReaderMutexLock(ReaderMutexLock&&) = delete;
- ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
- ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
-
- ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
-
- private:
- Mutex *const mu_;
-};
-
-// WriterMutexLock
-//
-// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
-// releases a write (exclusive) lock on a `Mutex` via RAII.
-class ABSL_SCOPED_LOCKABLE WriterMutexLock {
- public:
- explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- mu->WriterLock();
- }
-
+ ReaderMutexLock(const ReaderMutexLock&) = delete;
+ ReaderMutexLock(ReaderMutexLock&&) = delete;
+ ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
+ ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
+
+ ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// WriterMutexLock
+//
+// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a write (exclusive) lock on a `Mutex` via RAII.
+class ABSL_SCOPED_LOCKABLE WriterMutexLock {
+ public:
+ explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLock();
+ }
+
explicit WriterMutexLock(Mutex *mu, const Condition &cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLockWhen(cond);
}
- WriterMutexLock(const WriterMutexLock&) = delete;
- WriterMutexLock(WriterMutexLock&&) = delete;
- WriterMutexLock& operator=(const WriterMutexLock&) = delete;
- WriterMutexLock& operator=(WriterMutexLock&&) = delete;
-
- ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
-
- private:
- Mutex *const mu_;
-};
-
-// -----------------------------------------------------------------------------
-// Condition
-// -----------------------------------------------------------------------------
-//
-// As noted above, `Mutex` contains a number of member functions which take a
-// `Condition` as an argument; clients can wait for conditions to become `true`
-// before attempting to acquire the mutex. These sections are known as
-// "condition critical" sections. To use a `Condition`, you simply need to
-// construct it, and use within an appropriate `Mutex` member function;
-// everything else in the `Condition` class is an implementation detail.
-//
-// A `Condition` is specified as a function pointer which returns a boolean.
-// `Condition` functions should be pure functions -- their results should depend
-// only on passed arguments, should not consult any external state (such as
-// clocks), and should have no side-effects, aside from debug logging. Any
-// objects that the function may access should be limited to those which are
-// constant while the mutex is blocked on the condition (e.g. a stack variable),
-// or objects of state protected explicitly by the mutex.
-//
-// No matter which construction is used for `Condition`, the underlying
-// function pointer / functor / callable must not throw any
-// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
-// the face of a throwing `Condition`. (When Abseil is allowed to depend
-// on C++17, these function pointers will be explicitly marked
-// `noexcept`; until then this requirement cannot be enforced in the
-// type system.)
-//
+ WriterMutexLock(const WriterMutexLock&) = delete;
+ WriterMutexLock(WriterMutexLock&&) = delete;
+ WriterMutexLock& operator=(const WriterMutexLock&) = delete;
+ WriterMutexLock& operator=(WriterMutexLock&&) = delete;
+
+ ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
+
+ private:
+ Mutex *const mu_;
+};
+
+// -----------------------------------------------------------------------------
+// Condition
+// -----------------------------------------------------------------------------
+//
+// As noted above, `Mutex` contains a number of member functions which take a
+// `Condition` as an argument; clients can wait for conditions to become `true`
+// before attempting to acquire the mutex. These sections are known as
+// "condition critical" sections. To use a `Condition`, you simply need to
+// construct it, and use within an appropriate `Mutex` member function;
+// everything else in the `Condition` class is an implementation detail.
+//
+// A `Condition` is specified as a function pointer which returns a boolean.
+// `Condition` functions should be pure functions -- their results should depend
+// only on passed arguments, should not consult any external state (such as
+// clocks), and should have no side-effects, aside from debug logging. Any
+// objects that the function may access should be limited to those which are
+// constant while the mutex is blocked on the condition (e.g. a stack variable),
+// or objects of state protected explicitly by the mutex.
+//
+// No matter which construction is used for `Condition`, the underlying
+// function pointer / functor / callable must not throw any
+// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
+// the face of a throwing `Condition`. (When Abseil is allowed to depend
+// on C++17, these function pointers will be explicitly marked
+// `noexcept`; until then this requirement cannot be enforced in the
+// type system.)
+//
// Note: to use a `Condition`, you need only construct it and pass it to a
// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
// constructor of one of the scope guard classes.
-//
+//
// Example using LockWhen/Unlock:
-//
-// // assume count_ is not internal reference count
-// int count_ ABSL_GUARDED_BY(mu_);
+//
+// // assume count_ is not internal reference count
+// int count_ ABSL_GUARDED_BY(mu_);
// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
-//
+//
// mu_.LockWhen(count_is_zero);
// // ...
// mu_.Unlock();
-//
+//
// Example using a scope guard:
//
// {
@@ -649,236 +649,236 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
// // ...
// }
//
-// When multiple threads are waiting on exactly the same condition, make sure
-// that they are constructed with the same parameters (same pointer to function
-// + arg, or same pointer to object + method), so that the mutex implementation
-// can avoid redundantly evaluating the same condition for each thread.
-class Condition {
- public:
- // A Condition that returns the result of "(*func)(arg)"
- Condition(bool (*func)(void *), void *arg);
-
- // Templated version for people who are averse to casts.
- //
- // To use a lambda, prepend it with unary plus, which converts the lambda
- // into a function pointer:
- // Condition(+[](T* t) { return ...; }, arg).
- //
- // Note: lambdas in this case must contain no bound variables.
- //
- // See class comment for performance advice.
- template<typename T>
- Condition(bool (*func)(T *), T *arg);
-
- // Templated version for invoking a method that returns a `bool`.
- //
- // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
- // `object->Method()`.
- //
- // Implementation Note: `absl::internal::identity` is used to allow methods to
- // come from base classes. A simpler signature like
- // `Condition(T*, bool (T::*)())` does not suffice.
- template<typename T>
- Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
-
- // Same as above, for const members
- template<typename T>
- Condition(const T *object,
- bool (absl::internal::identity<T>::type::* method)() const);
-
- // A Condition that returns the value of `*cond`
- explicit Condition(const bool *cond);
-
- // Templated version for invoking a functor that returns a `bool`.
- // This approach accepts pointers to non-mutable lambdas, `std::function`,
- // the result of` std::bind` and user-defined functors that define
- // `bool F::operator()() const`.
- //
- // Example:
- //
- // auto reached = [this, current]() {
- // mu_.AssertReaderHeld(); // For annotalysis.
- // return processed_ >= current;
- // };
- // mu_.Await(Condition(&reached));
+// When multiple threads are waiting on exactly the same condition, make sure
+// that they are constructed with the same parameters (same pointer to function
+// + arg, or same pointer to object + method), so that the mutex implementation
+// can avoid redundantly evaluating the same condition for each thread.
+class Condition {
+ public:
+ // A Condition that returns the result of "(*func)(arg)"
+ Condition(bool (*func)(void *), void *arg);
+
+ // Templated version for people who are averse to casts.
+ //
+ // To use a lambda, prepend it with unary plus, which converts the lambda
+ // into a function pointer:
+ // Condition(+[](T* t) { return ...; }, arg).
+ //
+ // Note: lambdas in this case must contain no bound variables.
+ //
+ // See class comment for performance advice.
+ template<typename T>
+ Condition(bool (*func)(T *), T *arg);
+
+ // Templated version for invoking a method that returns a `bool`.
+ //
+ // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
+ // `object->Method()`.
+ //
+ // Implementation Note: `absl::internal::identity` is used to allow methods to
+ // come from base classes. A simpler signature like
+ // `Condition(T*, bool (T::*)())` does not suffice.
+ template<typename T>
+ Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+
+ // Same as above, for const members
+ template<typename T>
+ Condition(const T *object,
+ bool (absl::internal::identity<T>::type::* method)() const);
+
+ // A Condition that returns the value of `*cond`
+ explicit Condition(const bool *cond);
+
+ // Templated version for invoking a functor that returns a `bool`.
+ // This approach accepts pointers to non-mutable lambdas, `std::function`,
+ // the result of` std::bind` and user-defined functors that define
+ // `bool F::operator()() const`.
+ //
+ // Example:
+ //
+ // auto reached = [this, current]() {
+ // mu_.AssertReaderHeld(); // For annotalysis.
+ // return processed_ >= current;
+ // };
+ // mu_.Await(Condition(&reached));
//
// NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
// the lambda as it may be called when the mutex is being unlocked from a
// scope holding only a reader lock, which will make the assertion not
// fulfilled and crash the binary.
-
- // See class comment for performance advice. In particular, if there
- // might be more than one waiter for the same condition, make sure
- // that all waiters construct the condition with the same pointers.
-
- // Implementation note: The second template parameter ensures that this
- // constructor doesn't participate in overload resolution if T doesn't have
- // `bool operator() const`.
- template <typename T, typename E = decltype(
- static_cast<bool (T::*)() const>(&T::operator()))>
- explicit Condition(const T *obj)
- : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
-
- // A Condition that always returns `true`.
- static const Condition kTrue;
-
- // Evaluates the condition.
- bool Eval() const;
-
- // Returns `true` if the two conditions are guaranteed to return the same
- // value if evaluated at the same time, `false` if the evaluation *may* return
- // different results.
- //
- // Two `Condition` values are guaranteed equal if both their `func` and `arg`
- // components are the same. A null pointer is equivalent to a `true`
- // condition.
- static bool GuaranteedEqual(const Condition *a, const Condition *b);
-
- private:
- typedef bool (*InternalFunctionType)(void * arg);
- typedef bool (Condition::*InternalMethodType)();
- typedef bool (*InternalMethodCallerType)(void * arg,
- InternalMethodType internal_method);
-
- bool (*eval_)(const Condition*); // Actual evaluator
- InternalFunctionType function_; // function taking pointer returning bool
- InternalMethodType method_; // method returning bool
- void *arg_; // arg of function_ or object of method_
-
- Condition(); // null constructor used only to create kTrue
-
- // Various functions eval_ can point to:
- static bool CallVoidPtrFunction(const Condition*);
- template <typename T> static bool CastAndCallFunction(const Condition* c);
- template <typename T> static bool CastAndCallMethod(const Condition* c);
-};
-
-// -----------------------------------------------------------------------------
-// CondVar
-// -----------------------------------------------------------------------------
-//
-// A condition variable, reflecting state evaluated separately outside of the
-// `Mutex` object, which can be signaled to wake callers.
-// This class is not normally needed; use `Mutex` member functions such as
-// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
-// with many threads and many conditions, `CondVar` may be faster.
-//
-// The implementation may deliver signals to any condition variable at
-// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
-// result, upon being awoken, you must check the logical condition you have
-// been waiting upon.
-//
-// Examples:
-//
-// Usage for a thread waiting for some condition C protected by mutex mu:
-// mu.Lock();
-// while (!C) { cv->Wait(&mu); } // releases and reacquires mu
-// // C holds; process data
-// mu.Unlock();
-//
-// Usage to wake T is:
-// mu.Lock();
+
+ // See class comment for performance advice. In particular, if there
+ // might be more than one waiter for the same condition, make sure
+ // that all waiters construct the condition with the same pointers.
+
+ // Implementation note: The second template parameter ensures that this
+ // constructor doesn't participate in overload resolution if T doesn't have
+ // `bool operator() const`.
+ template <typename T, typename E = decltype(
+ static_cast<bool (T::*)() const>(&T::operator()))>
+ explicit Condition(const T *obj)
+ : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
+
+ // A Condition that always returns `true`.
+ static const Condition kTrue;
+
+ // Evaluates the condition.
+ bool Eval() const;
+
+ // Returns `true` if the two conditions are guaranteed to return the same
+ // value if evaluated at the same time, `false` if the evaluation *may* return
+ // different results.
+ //
+ // Two `Condition` values are guaranteed equal if both their `func` and `arg`
+ // components are the same. A null pointer is equivalent to a `true`
+ // condition.
+ static bool GuaranteedEqual(const Condition *a, const Condition *b);
+
+ private:
+ typedef bool (*InternalFunctionType)(void * arg);
+ typedef bool (Condition::*InternalMethodType)();
+ typedef bool (*InternalMethodCallerType)(void * arg,
+ InternalMethodType internal_method);
+
+ bool (*eval_)(const Condition*); // Actual evaluator
+ InternalFunctionType function_; // function taking pointer returning bool
+ InternalMethodType method_; // method returning bool
+ void *arg_; // arg of function_ or object of method_
+
+ Condition(); // null constructor used only to create kTrue
+
+ // Various functions eval_ can point to:
+ static bool CallVoidPtrFunction(const Condition*);
+ template <typename T> static bool CastAndCallFunction(const Condition* c);
+ template <typename T> static bool CastAndCallMethod(const Condition* c);
+};
+
+// -----------------------------------------------------------------------------
+// CondVar
+// -----------------------------------------------------------------------------
+//
+// A condition variable, reflecting state evaluated separately outside of the
+// `Mutex` object, which can be signaled to wake callers.
+// This class is not normally needed; use `Mutex` member functions such as
+// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
+// with many threads and many conditions, `CondVar` may be faster.
+//
+// The implementation may deliver signals to any condition variable at
+// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
+// result, upon being awoken, you must check the logical condition you have
+// been waiting upon.
+//
+// Examples:
+//
+// Usage for a thread waiting for some condition C protected by mutex mu:
+// mu.Lock();
+// while (!C) { cv->Wait(&mu); } // releases and reacquires mu
+// // C holds; process data
+// mu.Unlock();
+//
+// Usage to wake T is:
+// mu.Lock();
// // process data, possibly establishing C
// if (C) { cv->Signal(); }
// mu.Unlock();
-//
-// If C may be useful to more than one waiter, use `SignalAll()` instead of
-// `Signal()`.
-//
-// With this implementation it is efficient to use `Signal()/SignalAll()` inside
-// the locked region; this usage can make reasoning about your program easier.
-//
-class CondVar {
- public:
+//
+// If C may be useful to more than one waiter, use `SignalAll()` instead of
+// `Signal()`.
+//
+// With this implementation it is efficient to use `Signal()/SignalAll()` inside
+// the locked region; this usage can make reasoning about your program easier.
+//
+class CondVar {
+ public:
// A `CondVar` allocated on the heap or on the stack can use the this
// constructor.
- CondVar();
- ~CondVar();
-
- // CondVar::Wait()
- //
- // Atomically releases a `Mutex` and blocks on this condition variable.
- // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
- // spurious wakeup), then reacquires the `Mutex` and returns.
- //
- // Requires and ensures that the current thread holds the `Mutex`.
- void Wait(Mutex *mu);
-
- // CondVar::WaitWithTimeout()
- //
- // Atomically releases a `Mutex` and blocks on this condition variable.
- // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
- // spurious wakeup), or until the timeout has expired, then reacquires
- // the `Mutex` and returns.
- //
- // Returns true if the timeout has expired without this `CondVar`
- // being signalled in any manner. If both the timeout has expired
- // and this `CondVar` has been signalled, the implementation is free
- // to return `true` or `false`.
- //
- // Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
-
- // CondVar::WaitWithDeadline()
- //
- // Atomically releases a `Mutex` and blocks on this condition variable.
- // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
- // spurious wakeup), or until the deadline has passed, then reacquires
- // the `Mutex` and returns.
- //
- // Deadlines in the past are equivalent to an immediate deadline.
- //
- // Returns true if the deadline has passed without this `CondVar`
- // being signalled in any manner. If both the deadline has passed
- // and this `CondVar` has been signalled, the implementation is free
- // to return `true` or `false`.
- //
- // Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
-
- // CondVar::Signal()
- //
- // Signal this `CondVar`; wake at least one waiter if one exists.
- void Signal();
-
- // CondVar::SignalAll()
- //
- // Signal this `CondVar`; wake all waiters.
- void SignalAll();
-
- // CondVar::EnableDebugLog()
- //
- // Causes all subsequent uses of this `CondVar` to be logged via
- // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
- // Note: this method substantially reduces `CondVar` performance.
- void EnableDebugLog(const char *name);
-
- private:
- bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
- void Remove(base_internal::PerThreadSynch *s);
- void Wakeup(base_internal::PerThreadSynch *w);
- std::atomic<intptr_t> cv_; // Condition variable state.
- CondVar(const CondVar&) = delete;
- CondVar& operator=(const CondVar&) = delete;
-};
-
-
-// Variants of MutexLock.
-//
-// If you find yourself using one of these, consider instead using
-// Mutex::Unlock() and/or if-statements for clarity.
-
-// MutexLockMaybe
-//
-// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
-class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
- public:
- explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- if (this->mu_ != nullptr) {
- this->mu_->Lock();
- }
- }
+ CondVar();
+ ~CondVar();
+
+ // CondVar::Wait()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), then reacquires the `Mutex` and returns.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ void Wait(Mutex *mu);
+
+ // CondVar::WaitWithTimeout()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), or until the timeout has expired, then reacquires
+ // the `Mutex` and returns.
+ //
+ // Returns true if the timeout has expired without this `CondVar`
+ // being signalled in any manner. If both the timeout has expired
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+
+ // CondVar::WaitWithDeadline()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable.
+ // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
+ // spurious wakeup), or until the deadline has passed, then reacquires
+ // the `Mutex` and returns.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ //
+ // Returns true if the deadline has passed without this `CondVar`
+ // being signalled in any manner. If both the deadline has passed
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+
+ // CondVar::Signal()
+ //
+ // Signal this `CondVar`; wake at least one waiter if one exists.
+ void Signal();
+
+ // CondVar::SignalAll()
+ //
+ // Signal this `CondVar`; wake all waiters.
+ void SignalAll();
+
+ // CondVar::EnableDebugLog()
+ //
+ // Causes all subsequent uses of this `CondVar` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
+ // Note: this method substantially reduces `CondVar` performance.
+ void EnableDebugLog(const char *name);
+
+ private:
+ bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch *s);
+ void Wakeup(base_internal::PerThreadSynch *w);
+ std::atomic<intptr_t> cv_; // Condition variable state.
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+};
+
+
+// Variants of MutexLock.
+//
+// If you find yourself using one of these, consider instead using
+// Mutex::Unlock() and/or if-statements for clarity.
+
+// MutexLockMaybe
+//
+// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
+class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
+ public:
+ explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->Lock();
+ }
+ }
explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
@@ -888,28 +888,28 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
}
}
- ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
- }
-
- private:
- Mutex *const mu_;
- MutexLockMaybe(const MutexLockMaybe&) = delete;
- MutexLockMaybe(MutexLockMaybe&&) = delete;
- MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
- MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
-};
-
-// ReleasableMutexLock
-//
-// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
-// mutex before destruction. `Release()` may be called at most once.
-class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
- public:
- explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- this->mu_->Lock();
- }
+ ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+
+ private:
+ Mutex *const mu_;
+ MutexLockMaybe(const MutexLockMaybe&) = delete;
+ MutexLockMaybe(MutexLockMaybe&&) = delete;
+ MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
+ MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
+};
+
+// ReleasableMutexLock
+//
+// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
+// mutex before destruction. `Release()` may be called at most once.
+class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
+ public:
+ explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->Lock();
+ }
explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
@@ -917,166 +917,166 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
this->mu_->LockWhen(cond);
}
- ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
- }
-
- void Release() ABSL_UNLOCK_FUNCTION();
-
- private:
- Mutex *mu_;
- ReleasableMutexLock(const ReleasableMutexLock&) = delete;
- ReleasableMutexLock(ReleasableMutexLock&&) = delete;
- ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
- ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
-};
-
-inline Mutex::Mutex() : mu_(0) {
- ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
-}
-
-inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
-
-inline CondVar::CondVar() : cv_(0) {}
-
-// static
-template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
- typedef bool (T::*MemberType)();
- MemberType rm = reinterpret_cast<MemberType>(c->method_);
- T *x = static_cast<T *>(c->arg_);
- return (x->*rm)();
-}
-
-// static
-template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
- typedef bool (*FuncType)(T *);
- FuncType fn = reinterpret_cast<FuncType>(c->function_);
- T *x = static_cast<T *>(c->arg_);
- return (*fn)(x);
-}
-
-template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
- : eval_(&CastAndCallFunction<T>),
- function_(reinterpret_cast<InternalFunctionType>(func)),
- method_(nullptr),
- arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
-
-template <typename T>
-inline Condition::Condition(T *object,
- bool (absl::internal::identity<T>::type::*method)())
- : eval_(&CastAndCallMethod<T>),
- function_(nullptr),
- method_(reinterpret_cast<InternalMethodType>(method)),
- arg_(object) {}
-
-template <typename T>
-inline Condition::Condition(const T *object,
- bool (absl::internal::identity<T>::type::*method)()
- const)
- : eval_(&CastAndCallMethod<T>),
- function_(nullptr),
- method_(reinterpret_cast<InternalMethodType>(method)),
- arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
-
-// Register a hook for profiling support.
-//
-// The function pointer registered here will be called whenever a mutex is
-// contended. The callback is given the absl/base/cycleclock.h timestamp when
-// waiting began.
-//
-// Calls to this function do not race or block, but there is no ordering
-// guaranteed between calls to this function and call to the provided hook.
-// In particular, the previously registered hook may still be called for some
-// time after this function returns.
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
-
-// Register a hook for Mutex tracing.
-//
-// The function pointer registered here will be called whenever a mutex is
-// contended. The callback is given an opaque handle to the contended mutex,
-// an event name, and the number of wait cycles (as measured by
-// //absl/base/internal/cycleclock.h, and which may not be real
-// "cycle" counts.)
-//
-// The only event name currently sent is "slow release".
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+
+ void Release() ABSL_UNLOCK_FUNCTION();
+
+ private:
+ Mutex *mu_;
+ ReleasableMutexLock(const ReleasableMutexLock&) = delete;
+ ReleasableMutexLock(ReleasableMutexLock&&) = delete;
+ ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
+ ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
+};
+
+inline Mutex::Mutex() : mu_(0) {
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
+
+inline CondVar::CondVar() : cv_(0) {}
+
+// static
+template <typename T>
+bool Condition::CastAndCallMethod(const Condition *c) {
+ typedef bool (T::*MemberType)();
+ MemberType rm = reinterpret_cast<MemberType>(c->method_);
+ T *x = static_cast<T *>(c->arg_);
+ return (x->*rm)();
+}
+
+// static
+template <typename T>
+bool Condition::CastAndCallFunction(const Condition *c) {
+ typedef bool (*FuncType)(T *);
+ FuncType fn = reinterpret_cast<FuncType>(c->function_);
+ T *x = static_cast<T *>(c->arg_);
+ return (*fn)(x);
+}
+
+template <typename T>
+inline Condition::Condition(bool (*func)(T *), T *arg)
+ : eval_(&CastAndCallFunction<T>),
+ function_(reinterpret_cast<InternalFunctionType>(func)),
+ method_(nullptr),
+ arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
+
+template <typename T>
+inline Condition::Condition(T *object,
+ bool (absl::internal::identity<T>::type::*method)())
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(object) {}
+
+template <typename T>
+inline Condition::Condition(const T *object,
+ bool (absl::internal::identity<T>::type::*method)()
+ const)
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given the absl/base/cycleclock.h timestamp when
+// waiting began.
+//
+// Calls to this function do not race or block, but there is no ordering
+// guaranteed between calls to this function and call to the provided hook.
+// In particular, the previously registered hook may still be called for some
+// time after this function returns.
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+
+// Register a hook for Mutex tracing.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given an opaque handle to the contended mutex,
+// an event name, and the number of wait cycles (as measured by
+// //absl/base/internal/cycleclock.h, and which may not be real
+// "cycle" counts.)
+//
+// The only event name currently sent is "slow release".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
int64_t wait_cycles));
-
-// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
-// into a single interface, since they are only ever called in pairs.
-
-// Register a hook for CondVar tracing.
-//
-// The function pointer registered here will be called here on various CondVar
-// events. The callback is given an opaque handle to the CondVar object and
-// a string identifying the event. This is thread-safe, but only a single
-// tracer can be registered.
-//
-// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
-// "SignalAll wakeup".
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer. This function can return
+
+// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
+// into a single interface, since they are only ever called in pairs.
+
+// Register a hook for CondVar tracing.
+//
+// The function pointer registered here will be called here on various CondVar
+// events. The callback is given an opaque handle to the CondVar object and
+// a string identifying the event. This is thread-safe, but only a single
+// tracer can be registered.
+//
+// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
+// "SignalAll wakeup".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
+
+// Register a hook for symbolizing stack traces in deadlock detector reports.
+//
+// 'pc' is the program counter being symbolized, 'out' is the buffer to write
+// into, and 'out_size' is the size of the buffer. This function can return
// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
- "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
-
-// EnableMutexInvariantDebugging()
-//
-// Enable or disable global support for Mutex invariant debugging. If enabled,
-// then invariant predicates can be registered per-Mutex for debug checking.
-// See Mutex::EnableInvariantDebugging().
-void EnableMutexInvariantDebugging(bool enabled);
-
-// When in debug mode, and when the feature has been enabled globally, the
-// implementation will keep track of lock ordering and complain (or optionally
-// crash) if a cycle is detected in the acquired-before graph.
-
-// Possible modes of operation for the deadlock detector in debug mode.
-enum class OnDeadlockCycle {
- kIgnore, // Neither report on nor attempt to track cycles in lock ordering
- kReport, // Report lock cycles to stderr when detected
- kAbort, // Report lock cycles to stderr when detected, then abort
-};
-
-// SetMutexDeadlockDetectionMode()
-//
-// Enable or disable global support for detection of potential deadlocks
-// due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
-// lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
-// will be maintained internally, and detected cycles will be reported in
-// the manner chosen here.
-void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
-
+// to 'out.'
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+//
+// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
+// ability to register a different hook for symbolizing stack traces will be
+// removed on or after 2023-05-01.
+ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
+ "on or after 2023-05-01")
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+
+// EnableMutexInvariantDebugging()
+//
+// Enable or disable global support for Mutex invariant debugging. If enabled,
+// then invariant predicates can be registered per-Mutex for debug checking.
+// See Mutex::EnableInvariantDebugging().
+void EnableMutexInvariantDebugging(bool enabled);
+
+// When in debug mode, and when the feature has been enabled globally, the
+// implementation will keep track of lock ordering and complain (or optionally
+// crash) if a cycle is detected in the acquired-before graph.
+
+// Possible modes of operation for the deadlock detector in debug mode.
+enum class OnDeadlockCycle {
+ kIgnore, // Neither report on nor attempt to track cycles in lock ordering
+ kReport, // Report lock cycles to stderr when detected
+ kAbort, // Report lock cycles to stderr when detected, then abort
+};
+
+// SetMutexDeadlockDetectionMode()
+//
+// Enable or disable global support for detection of potential deadlocks
+// due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
+// lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
+// will be maintained internally, and detected cycles will be reported in
+// the manner chosen here.
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
+
ABSL_NAMESPACE_END
-} // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker. This causes it to flag weak symbol overrides as ODR
-// violations. Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
-} // extern "C"
-
-#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
+} // extern "C"
+
+#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc b/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
index 3d876ce600..e91b903822 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/notification.cc
@@ -1,78 +1,78 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/notification.h"
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/time.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/notification.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-void Notification::Notify() {
- MutexLock l(&this->mutex_);
-
-#ifndef NDEBUG
- if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
- ABSL_RAW_LOG(
- FATAL,
- "Notify() method called more than once for Notification object %p",
- static_cast<void *>(this));
- }
-#endif
-
- notified_yet_.store(true, std::memory_order_release);
-}
-
-Notification::~Notification() {
- // Make sure that the thread running Notify() exits before the object is
- // destructed.
- MutexLock l(&this->mutex_);
-}
-
-void Notification::WaitForNotification() const {
- if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
- this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
- &this->notified_yet_));
- this->mutex_.Unlock();
- }
-}
-
-bool Notification::WaitForNotificationWithTimeout(
- absl::Duration timeout) const {
- bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
- if (!notified) {
- notified = this->mutex_.LockWhenWithTimeout(
- Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
- this->mutex_.Unlock();
- }
- return notified;
-}
-
-bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
- bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
- if (!notified) {
- notified = this->mutex_.LockWhenWithDeadline(
- Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
- this->mutex_.Unlock();
- }
- return notified;
-}
-
+
+void Notification::Notify() {
+ MutexLock l(&this->mutex_);
+
+#ifndef NDEBUG
+ if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Notify() method called more than once for Notification object %p",
+ static_cast<void *>(this));
+ }
+#endif
+
+ notified_yet_.store(true, std::memory_order_release);
+}
+
+Notification::~Notification() {
+ // Make sure that the thread running Notify() exits before the object is
+ // destructed.
+ MutexLock l(&this->mutex_);
+}
+
+void Notification::WaitForNotification() const {
+ if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
+ this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
+ &this->notified_yet_));
+ this->mutex_.Unlock();
+ }
+}
+
+bool Notification::WaitForNotificationWithTimeout(
+ absl::Duration timeout) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithTimeout(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
+bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithDeadline(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
index 7c5d8f4222..9a354ca2c0 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/notification.h
@@ -1,123 +1,123 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// notification.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines a `Notification` abstraction, which allows threads
-// to receive notification of a single occurrence of a single event.
-//
-// The `Notification` object maintains a private boolean "notified" state that
-// transitions to `true` at most once. The `Notification` class provides the
-// following primary member functions:
-// * `HasBeenNotified() `to query its state
-// * `WaitForNotification*()` to have threads wait until the "notified" state
-// is `true`.
-// * `Notify()` to set the notification's "notified" state to `true` and
-// notify all waiting threads that the event has occurred.
-// This method may only be called once.
-//
-// Note that while `Notify()` may only be called once, it is perfectly valid to
-// call any of the `WaitForNotification*()` methods multiple times, from
-// multiple threads -- even after the notification's "notified" state has been
-// set -- in which case those methods will immediately return.
-//
-// Note that the lifetime of a `Notification` requires careful consideration;
-// it might not be safe to destroy a notification after calling `Notify()` since
-// it is still legal for other threads to call `WaitForNotification*()` methods
-// on the notification. However, observers responding to a "notified" state of
-// `true` can safely delete the notification without interfering with the call
-// to `Notify()` in the other thread.
-//
-// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any
-// action taken by X before it calls `Notify()` is visible to thread Y after:
-// * Y returns from `WaitForNotification()`, or
-// * Y receives a `true` return value from either `HasBeenNotified()` or
-// `WaitForNotificationWithTimeout()`.
-
-#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
-#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
-
-#include <atomic>
-
-#include "absl/base/macros.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/time.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// notification.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Notification` abstraction, which allows threads
+// to receive notification of a single occurrence of a single event.
+//
+// The `Notification` object maintains a private boolean "notified" state that
+// transitions to `true` at most once. The `Notification` class provides the
+// following primary member functions:
+// * `HasBeenNotified() `to query its state
+// * `WaitForNotification*()` to have threads wait until the "notified" state
+// is `true`.
+// * `Notify()` to set the notification's "notified" state to `true` and
+// notify all waiting threads that the event has occurred.
+// This method may only be called once.
+//
+// Note that while `Notify()` may only be called once, it is perfectly valid to
+// call any of the `WaitForNotification*()` methods multiple times, from
+// multiple threads -- even after the notification's "notified" state has been
+// set -- in which case those methods will immediately return.
+//
+// Note that the lifetime of a `Notification` requires careful consideration;
+// it might not be safe to destroy a notification after calling `Notify()` since
+// it is still legal for other threads to call `WaitForNotification*()` methods
+// on the notification. However, observers responding to a "notified" state of
+// `true` can safely delete the notification without interfering with the call
+// to `Notify()` in the other thread.
+//
+// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any
+// action taken by X before it calls `Notify()` is visible to thread Y after:
+// * Y returns from `WaitForNotification()`, or
+// * Y receives a `true` return value from either `HasBeenNotified()` or
+// `WaitForNotificationWithTimeout()`.
+
+#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+
+#include <atomic>
+
+#include "absl/base/macros.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// Notification
-// -----------------------------------------------------------------------------
-class Notification {
- public:
- // Initializes the "notified" state to unnotified.
- Notification() : notified_yet_(false) {}
- explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
- Notification(const Notification&) = delete;
- Notification& operator=(const Notification&) = delete;
- ~Notification();
-
- // Notification::HasBeenNotified()
- //
- // Returns the value of the notification's internal "notified" state.
- bool HasBeenNotified() const {
- return HasBeenNotifiedInternal(&this->notified_yet_);
- }
-
- // Notification::WaitForNotification()
- //
- // Blocks the calling thread until the notification's "notified" state is
- // `true`. Note that if `Notify()` has been previously called on this
- // notification, this function will immediately return.
- void WaitForNotification() const;
-
- // Notification::WaitForNotificationWithTimeout()
- //
- // Blocks until either the notification's "notified" state is `true` (which
- // may occur immediately) or the timeout has elapsed, returning the value of
- // its "notified" state in either case.
- bool WaitForNotificationWithTimeout(absl::Duration timeout) const;
-
- // Notification::WaitForNotificationWithDeadline()
- //
- // Blocks until either the notification's "notified" state is `true` (which
- // may occur immediately) or the deadline has expired, returning the value of
- // its "notified" state in either case.
- bool WaitForNotificationWithDeadline(absl::Time deadline) const;
-
- // Notification::Notify()
- //
- // Sets the "notified" state of this notification to `true` and wakes waiting
- // threads. Note: do not call `Notify()` multiple times on the same
- // `Notification`; calling `Notify()` more than once on the same notification
- // results in undefined behavior.
- void Notify();
-
- private:
- static inline bool HasBeenNotifiedInternal(
- const std::atomic<bool>* notified_yet) {
- return notified_yet->load(std::memory_order_acquire);
- }
-
- mutable Mutex mutex_;
- std::atomic<bool> notified_yet_; // written under mutex_
-};
-
+
+// -----------------------------------------------------------------------------
+// Notification
+// -----------------------------------------------------------------------------
+class Notification {
+ public:
+ // Initializes the "notified" state to unnotified.
+ Notification() : notified_yet_(false) {}
+ explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
+ Notification(const Notification&) = delete;
+ Notification& operator=(const Notification&) = delete;
+ ~Notification();
+
+ // Notification::HasBeenNotified()
+ //
+ // Returns the value of the notification's internal "notified" state.
+ bool HasBeenNotified() const {
+ return HasBeenNotifiedInternal(&this->notified_yet_);
+ }
+
+ // Notification::WaitForNotification()
+ //
+ // Blocks the calling thread until the notification's "notified" state is
+ // `true`. Note that if `Notify()` has been previously called on this
+ // notification, this function will immediately return.
+ void WaitForNotification() const;
+
+ // Notification::WaitForNotificationWithTimeout()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the timeout has elapsed, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithTimeout(absl::Duration timeout) const;
+
+ // Notification::WaitForNotificationWithDeadline()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the deadline has expired, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithDeadline(absl::Time deadline) const;
+
+ // Notification::Notify()
+ //
+ // Sets the "notified" state of this notification to `true` and wakes waiting
+ // threads. Note: do not call `Notify()` multiple times on the same
+ // `Notification`; calling `Notify()` more than once on the same notification
+ // results in undefined behavior.
+ void Notify();
+
+ private:
+ static inline bool HasBeenNotifiedInternal(
+ const std::atomic<bool>* notified_yet) {
+ return notified_yet->load(std::memory_order_acquire);
+ }
+
+ mutable Mutex mutex_;
+ std::atomic<bool> notified_yet_; // written under mutex_
+};
+
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/ya.make b/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
index b95475754d..06f72b69e9 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/ya.make
@@ -1,53 +1,53 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
- contrib/restricted/abseil-cpp/absl/base/log_severity
- contrib/restricted/abseil-cpp/absl/debugging
- contrib/restricted/abseil-cpp/absl/debugging/stacktrace
- contrib/restricted/abseil-cpp/absl/debugging/symbolize
- contrib/restricted/abseil-cpp/absl/demangle
- contrib/restricted/abseil-cpp/absl/numeric
- contrib/restricted/abseil-cpp/absl/strings
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/internal/throw_delegate
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+ contrib/restricted/abseil-cpp/absl/debugging
+ contrib/restricted/abseil-cpp/absl/debugging/stacktrace
+ contrib/restricted/abseil-cpp/absl/debugging/symbolize
+ contrib/restricted/abseil-cpp/absl/demangle
+ contrib/restricted/abseil-cpp/absl/numeric
+ contrib/restricted/abseil-cpp/absl/strings
contrib/restricted/abseil-cpp/absl/strings/internal/absl_strings_internal
- contrib/restricted/abseil-cpp/absl/synchronization/internal
- contrib/restricted/abseil-cpp/absl/time
- contrib/restricted/abseil-cpp/absl/time/civil_time
- contrib/restricted/abseil-cpp/absl/time/time_zone
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+ contrib/restricted/abseil-cpp/absl/synchronization/internal
+ contrib/restricted/abseil-cpp/absl/time
+ contrib/restricted/abseil-cpp/absl/time/civil_time
+ contrib/restricted/abseil-cpp/absl/time/time_zone
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCS(
- barrier.cc
- blocking_counter.cc
- internal/create_thread_identity.cc
- internal/per_thread_sem.cc
- internal/waiter.cc
- mutex.cc
- notification.cc
-)
-
-END()
+SRCS(
+ barrier.cc
+ blocking_counter.cc
+ internal/create_thread_identity.cc
+ internal/per_thread_sem.cc
+ internal/waiter.cc
+ mutex.cc
+ notification.cc
+)
+
+END()