aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp/absl/synchronization/internal
diff options
context:
space:
mode:
authoranastasy888 <anastasy888@yandex-team.ru>2022-02-10 16:45:55 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:55 +0300
commit3a7a498715ef1b66f5054455421b845e45e3a653 (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/restricted/abseil-cpp/absl/synchronization/internal
parent49f765d71da452ea93138a25559dfa68dd76c7f3 (diff)
downloadydb-3a7a498715ef1b66f5054455421b845e45e3a653.tar.gz
Restoring authorship annotation for <anastasy888@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp/absl/synchronization/internal')
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc270
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h116
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc1384
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h274
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h252
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc204
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h216
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h182
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc816
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h290
-rw-r--r--contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make58
11 files changed, 2031 insertions, 2031 deletions
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
index 2d4250f8a8..53a71b342b 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc
@@ -1,140 +1,140 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <new>
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include <string.h>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdint.h>
+#include <new>
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include <string.h>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// ThreadIdentity storage is persistent, we maintain a free-list of previously
-// released ThreadIdentity objects.
+namespace synchronization_internal {
+
+// ThreadIdentity storage is persistent, we maintain a free-list of previously
+// released ThreadIdentity objects.
ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
- base_internal::ThreadIdentity* identity =
- static_cast<base_internal::ThreadIdentity*>(v);
-
- // all_locks might have been allocated by the Mutex implementation.
- // We free it here when we are notified that our thread is dying.
- if (identity->per_thread_synch.all_locks != nullptr) {
- base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
- }
-
- PerThreadSem::Destroy(identity);
-
- // We must explicitly clear the current thread's identity:
- // (a) Subsequent (unrelated) per-thread destructors may require an identity.
- // We must guarantee a new identity is used in this case (this instructor
- // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
- // (b) ThreadIdentity implementations may depend on memory that is not
- // reinitialized before reuse. We must allow explicit clearing of the
- // association state in this case.
- base_internal::ClearCurrentThreadIdentity();
- {
- base_internal::SpinLockHolder l(&freelist_lock);
- identity->next = thread_identity_freelist;
- thread_identity_freelist = identity;
- }
-}
-
-// Return value rounded up to next multiple of align.
-// Align must be a power of two.
-static intptr_t RoundUp(intptr_t addr, intptr_t align) {
- return (addr + align - 1) & ~(align - 1);
-}
-
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
- base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
- pts->next = nullptr;
- pts->skip = nullptr;
- pts->may_skip = false;
- pts->waitp = nullptr;
- pts->suppress_fatal_errors = false;
- pts->readers = 0;
- pts->priority = 0;
- pts->next_priority_read_cycles = 0;
- pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
- std::memory_order_relaxed);
- pts->maybe_unlocking = false;
- pts->wake = false;
- pts->cond_waiter = false;
- pts->all_locks = nullptr;
- identity->blocked_count_ptr = nullptr;
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
- identity->next = nullptr;
-}
-
-static base_internal::ThreadIdentity* NewThreadIdentity() {
- base_internal::ThreadIdentity* identity = nullptr;
-
- {
- // Re-use a previously released object if possible.
- base_internal::SpinLockHolder l(&freelist_lock);
- if (thread_identity_freelist) {
- identity = thread_identity_freelist; // Take list-head.
- thread_identity_freelist = thread_identity_freelist->next;
- }
- }
-
- if (identity == nullptr) {
- // Allocate enough space to align ThreadIdentity to a multiple of
- // PerThreadSynch::kAlignment. This space is never released (it is
- // added to a freelist by ReclaimThreadIdentity instead).
- void* allocation = base_internal::LowLevelAlloc::Alloc(
- sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
- // Round up the address to the required alignment.
- identity = reinterpret_cast<base_internal::ThreadIdentity*>(
- RoundUp(reinterpret_cast<intptr_t>(allocation),
- base_internal::PerThreadSynch::kAlignment));
- }
- ResetThreadIdentity(identity);
-
- return identity;
-}
-
-// Allocates and attaches ThreadIdentity object for the calling thread. Returns
-// the new identity.
-// REQUIRES: CurrentThreadIdentity(false) == nullptr
-base_internal::ThreadIdentity* CreateThreadIdentity() {
- base_internal::ThreadIdentity* identity = NewThreadIdentity();
- PerThreadSem::Init(identity);
- // Associate the value with the current thread, and attach our destructor.
- base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
- return identity;
-}
-
-} // namespace synchronization_internal
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// Since we must preserve their storage we cache them for re-use.
+void ReclaimThreadIdentity(void* v) {
+ base_internal::ThreadIdentity* identity =
+ static_cast<base_internal::ThreadIdentity*>(v);
+
+ // all_locks might have been allocated by the Mutex implementation.
+ // We free it here when we are notified that our thread is dying.
+ if (identity->per_thread_synch.all_locks != nullptr) {
+ base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
+ }
+
+ PerThreadSem::Destroy(identity);
+
+ // We must explicitly clear the current thread's identity:
+ // (a) Subsequent (unrelated) per-thread destructors may require an identity.
+ // We must guarantee a new identity is used in this case (this instructor
+ // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
+ // (b) ThreadIdentity implementations may depend on memory that is not
+ // reinitialized before reuse. We must allow explicit clearing of the
+ // association state in this case.
+ base_internal::ClearCurrentThreadIdentity();
+ {
+ base_internal::SpinLockHolder l(&freelist_lock);
+ identity->next = thread_identity_freelist;
+ thread_identity_freelist = identity;
+ }
+}
+
+// Return value rounded up to next multiple of align.
+// Align must be a power of two.
+static intptr_t RoundUp(intptr_t addr, intptr_t align) {
+ return (addr + align - 1) & ~(align - 1);
+}
+
+static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+ base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
+ pts->next = nullptr;
+ pts->skip = nullptr;
+ pts->may_skip = false;
+ pts->waitp = nullptr;
+ pts->suppress_fatal_errors = false;
+ pts->readers = 0;
+ pts->priority = 0;
+ pts->next_priority_read_cycles = 0;
+ pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
+ std::memory_order_relaxed);
+ pts->maybe_unlocking = false;
+ pts->wake = false;
+ pts->cond_waiter = false;
+ pts->all_locks = nullptr;
+ identity->blocked_count_ptr = nullptr;
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->next = nullptr;
+}
+
+static base_internal::ThreadIdentity* NewThreadIdentity() {
+ base_internal::ThreadIdentity* identity = nullptr;
+
+ {
+ // Re-use a previously released object if possible.
+ base_internal::SpinLockHolder l(&freelist_lock);
+ if (thread_identity_freelist) {
+ identity = thread_identity_freelist; // Take list-head.
+ thread_identity_freelist = thread_identity_freelist->next;
+ }
+ }
+
+ if (identity == nullptr) {
+ // Allocate enough space to align ThreadIdentity to a multiple of
+ // PerThreadSynch::kAlignment. This space is never released (it is
+ // added to a freelist by ReclaimThreadIdentity instead).
+ void* allocation = base_internal::LowLevelAlloc::Alloc(
+ sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
+ // Round up the address to the required alignment.
+ identity = reinterpret_cast<base_internal::ThreadIdentity*>(
+ RoundUp(reinterpret_cast<intptr_t>(allocation),
+ base_internal::PerThreadSynch::kAlignment));
+ }
+ ResetThreadIdentity(identity);
+
+ return identity;
+}
+
+// Allocates and attaches ThreadIdentity object for the calling thread. Returns
+// the new identity.
+// REQUIRES: CurrentThreadIdentity(false) == nullptr
+base_internal::ThreadIdentity* CreateThreadIdentity() {
+ base_internal::ThreadIdentity* identity = NewThreadIdentity();
+ PerThreadSem::Init(identity);
+ // Associate the value with the current thread, and attach our destructor.
+ base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
+ return identity;
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
index 517c8e49d7..e121f68377 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/create_thread_identity.h
@@ -1,60 +1,60 @@
-/*
- * Copyright 2017 The Abseil Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Interface for getting the current ThreadIdentity, creating one if necessary.
-// See thread_identity.h.
-//
-// This file is separate from thread_identity.h because creating a new
-// ThreadIdentity requires slightly higher level libraries (per_thread_sem
-// and low_level_alloc) than accessing an existing one. This separation allows
-// us to have a smaller //absl/base:base.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/port.h"
-
-namespace absl {
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Interface for getting the current ThreadIdentity, creating one if necessary.
+// See thread_identity.h.
+//
+// This file is separate from thread_identity.h because creating a new
+// ThreadIdentity requires slightly higher level libraries (per_thread_sem
+// and low_level_alloc) than accessing an existing one. This separation allows
+// us to have a smaller //absl/base:base.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/port.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Allocates and attaches a ThreadIdentity object for the calling thread.
-// For private use only.
-base_internal::ThreadIdentity* CreateThreadIdentity();
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
-// Returns the ThreadIdentity object representing the calling thread; guaranteed
-// to be unique for its lifetime. The returned object will remain valid for the
-// program's lifetime; although it may be re-assigned to a subsequent thread.
-// If one does not exist for the calling thread, allocate it now.
-inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
- base_internal::ThreadIdentity* identity =
- base_internal::CurrentThreadIdentityIfPresent();
- if (ABSL_PREDICT_FALSE(identity == nullptr)) {
- return CreateThreadIdentity();
- }
- return identity;
-}
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// Allocates and attaches a ThreadIdentity object for the calling thread.
+// For private use only.
+base_internal::ThreadIdentity* CreateThreadIdentity();
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// For private use only.
+void ReclaimThreadIdentity(void* v);
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist for the calling thread, allocate it now.
+inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
+ base_internal::ThreadIdentity* identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ if (ABSL_PREDICT_FALSE(identity == nullptr)) {
+ return CreateThreadIdentity();
+ }
+ return identity;
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
index e0b4f0454d..27fec21681 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.cc
@@ -1,698 +1,698 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// GraphCycles provides incremental cycle detection on a dynamic
-// graph using the following algorithm:
-//
-// A dynamic topological sort algorithm for directed acyclic graphs
-// David J. Pearce, Paul H. J. Kelly
-// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
-// Volume 11, 2006, Article No. 1.7
-//
-// Brief summary of the algorithm:
-//
-// (1) Maintain a rank for each node that is consistent
-// with the topological sort of the graph. I.e., path from x to y
-// implies rank[x] < rank[y].
-// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
-// (3) Otherwise: adjust ranks in the neighborhood of x and y.
-
-#include "absl/base/attributes.h"
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/graphcycles.h"
-
-#include <algorithm>
-#include <array>
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// GraphCycles provides incremental cycle detection on a dynamic
+// graph using the following algorithm:
+//
+// A dynamic topological sort algorithm for directed acyclic graphs
+// David J. Pearce, Paul H. J. Kelly
+// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
+// Volume 11, 2006, Article No. 1.7
+//
+// Brief summary of the algorithm:
+//
+// (1) Maintain a rank for each node that is consistent
+// with the topological sort of the graph. I.e., path from x to y
+// implies rank[x] < rank[y].
+// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
+// (3) Otherwise: adjust ranks in the neighborhood of x and y.
+
+#include "absl/base/attributes.h"
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/graphcycles.h"
+
+#include <algorithm>
+#include <array>
#include <limits>
-#include "absl/base/internal/hide_ptr.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-
-// Do not use STL. This module does not use standard memory allocation.
-
-namespace absl {
+#include "absl/base/internal/hide_ptr.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// Do not use STL. This module does not use standard memory allocation.
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-namespace {
-
-// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
-// which people are doing things like acquiring Mutexes.
+namespace synchronization_internal {
+
+namespace {
+
+// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
+// which people are doing things like acquiring Mutexes.
ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
-
-static void InitArenaIfNecessary() {
- arena_mu.Lock();
- if (arena == nullptr) {
- arena = base_internal::LowLevelAlloc::NewArena(0);
- }
- arena_mu.Unlock();
-}
-
-// Number of inlined elements in Vec. Hash table implementation
-// relies on this being a power of two.
-static const uint32_t kInline = 8;
-
-// A simple LowLevelAlloc based resizable vector with inlined storage
-// for a few elements. T must be a plain type since constructor
-// and destructor are not run on elements of type T managed by Vec.
-template <typename T>
-class Vec {
- public:
- Vec() { Init(); }
- ~Vec() { Discard(); }
-
- void clear() {
- Discard();
- Init();
- }
-
- bool empty() const { return size_ == 0; }
- uint32_t size() const { return size_; }
- T* begin() { return ptr_; }
- T* end() { return ptr_ + size_; }
- const T& operator[](uint32_t i) const { return ptr_[i]; }
- T& operator[](uint32_t i) { return ptr_[i]; }
- const T& back() const { return ptr_[size_-1]; }
- void pop_back() { size_--; }
-
- void push_back(const T& v) {
- if (size_ == capacity_) Grow(size_ + 1);
- ptr_[size_] = v;
- size_++;
- }
-
- void resize(uint32_t n) {
- if (n > capacity_) Grow(n);
- size_ = n;
- }
-
- void fill(const T& val) {
- for (uint32_t i = 0; i < size(); i++) {
- ptr_[i] = val;
- }
- }
-
- // Guarantees src is empty at end.
- // Provided for the hash table resizing code below.
- void MoveFrom(Vec<T>* src) {
- if (src->ptr_ == src->space_) {
- // Need to actually copy
- resize(src->size_);
- std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
- src->size_ = 0;
- } else {
- Discard();
- ptr_ = src->ptr_;
- size_ = src->size_;
- capacity_ = src->capacity_;
- src->Init();
- }
- }
-
- private:
- T* ptr_;
- T space_[kInline];
- uint32_t size_;
- uint32_t capacity_;
-
- void Init() {
- ptr_ = space_;
- size_ = 0;
- capacity_ = kInline;
- }
-
- void Discard() {
- if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
- }
-
- void Grow(uint32_t n) {
- while (capacity_ < n) {
- capacity_ *= 2;
- }
- size_t request = static_cast<size_t>(capacity_) * sizeof(T);
- T* copy = static_cast<T*>(
- base_internal::LowLevelAlloc::AllocWithArena(request, arena));
- std::copy(ptr_, ptr_ + size_, copy);
- Discard();
- ptr_ = copy;
- }
-
- Vec(const Vec&) = delete;
- Vec& operator=(const Vec&) = delete;
-};
-
-// A hash set of non-negative int32_t that uses Vec for its underlying storage.
-class NodeSet {
- public:
- NodeSet() { Init(); }
-
- void clear() { Init(); }
- bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
-
- bool insert(int32_t v) {
- uint32_t i = FindIndex(v);
- if (table_[i] == v) {
- return false;
- }
- if (table_[i] == kEmpty) {
- // Only inserting over an empty cell increases the number of occupied
- // slots.
- occupied_++;
- }
- table_[i] = v;
- // Double when 75% full.
- if (occupied_ >= table_.size() - table_.size()/4) Grow();
- return true;
- }
-
- void erase(uint32_t v) {
- uint32_t i = FindIndex(v);
- if (static_cast<uint32_t>(table_[i]) == v) {
- table_[i] = kDel;
- }
- }
-
- // Iteration: is done via HASH_FOR_EACH
- // Example:
- // HASH_FOR_EACH(elem, node->out) { ... }
-#define HASH_FOR_EACH(elem, eset) \
- for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
- bool Next(int32_t* cursor, int32_t* elem) {
- while (static_cast<uint32_t>(*cursor) < table_.size()) {
- int32_t v = table_[*cursor];
- (*cursor)++;
- if (v >= 0) {
- *elem = v;
- return true;
- }
- }
- return false;
- }
-
- private:
- enum : int32_t { kEmpty = -1, kDel = -2 };
- Vec<int32_t> table_;
- uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
-
- static uint32_t Hash(uint32_t a) { return a * 41; }
-
- // Return index for storing v. May return an empty index or deleted index
- int FindIndex(int32_t v) const {
- // Search starting at hash index.
- const uint32_t mask = table_.size() - 1;
- uint32_t i = Hash(v) & mask;
- int deleted_index = -1; // If >= 0, index of first deleted element we see
- while (true) {
- int32_t e = table_[i];
- if (v == e) {
- return i;
- } else if (e == kEmpty) {
- // Return any previously encountered deleted slot.
- return (deleted_index >= 0) ? deleted_index : i;
- } else if (e == kDel && deleted_index < 0) {
- // Keep searching since v might be present later.
- deleted_index = i;
- }
- i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
- }
- }
-
- void Init() {
- table_.clear();
- table_.resize(kInline);
- table_.fill(kEmpty);
- occupied_ = 0;
- }
-
- void Grow() {
- Vec<int32_t> copy;
- copy.MoveFrom(&table_);
- occupied_ = 0;
- table_.resize(copy.size() * 2);
- table_.fill(kEmpty);
-
- for (const auto& e : copy) {
- if (e >= 0) insert(e);
- }
- }
-
- NodeSet(const NodeSet&) = delete;
- NodeSet& operator=(const NodeSet&) = delete;
-};
-
-// We encode a node index and a node version in GraphId. The version
-// number is incremented when the GraphId is freed which automatically
-// invalidates all copies of the GraphId.
-
-inline GraphId MakeId(int32_t index, uint32_t version) {
- GraphId g;
- g.handle =
- (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
- return g;
-}
-
-inline int32_t NodeIndex(GraphId id) {
- return static_cast<uint32_t>(id.handle & 0xfffffffful);
-}
-
-inline uint32_t NodeVersion(GraphId id) {
- return static_cast<uint32_t>(id.handle >> 32);
-}
-
-struct Node {
- int32_t rank; // rank number assigned by Pearce-Kelly algorithm
- uint32_t version; // Current version number
- int32_t next_hash; // Next entry in hash table
- bool visited; // Temporary marker used by depth-first-search
- uintptr_t masked_ptr; // User-supplied pointer
- NodeSet in; // List of immediate predecessor nodes in graph
- NodeSet out; // List of immediate successor nodes in graph
- int priority; // Priority of recorded stack trace.
- int nstack; // Depth of recorded stack trace.
- void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
-};
-
-// Hash table for pointer to node index lookups.
-class PointerMap {
- public:
- explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
- table_.fill(-1);
- }
-
- int32_t Find(void* ptr) {
- auto masked = base_internal::HidePtr(ptr);
- for (int32_t i = table_[Hash(ptr)]; i != -1;) {
- Node* n = (*nodes_)[i];
- if (n->masked_ptr == masked) return i;
- i = n->next_hash;
- }
- return -1;
- }
-
- void Add(void* ptr, int32_t i) {
- int32_t* head = &table_[Hash(ptr)];
- (*nodes_)[i]->next_hash = *head;
- *head = i;
- }
-
- int32_t Remove(void* ptr) {
- // Advance through linked list while keeping track of the
- // predecessor slot that points to the current entry.
- auto masked = base_internal::HidePtr(ptr);
- for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
- int32_t index = *slot;
- Node* n = (*nodes_)[index];
- if (n->masked_ptr == masked) {
- *slot = n->next_hash; // Remove n from linked list
- n->next_hash = -1;
- return index;
- }
- slot = &n->next_hash;
- }
- return -1;
- }
-
- private:
- // Number of buckets in hash table for pointer lookups.
- static constexpr uint32_t kHashTableSize = 8171; // should be prime
-
- const Vec<Node*>* nodes_;
- std::array<int32_t, kHashTableSize> table_;
-
- static uint32_t Hash(void* ptr) {
- return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
- }
-};
-
-} // namespace
-
-struct GraphCycles::Rep {
- Vec<Node*> nodes_;
- Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
- PointerMap ptrmap_;
-
- // Temporary state.
- Vec<int32_t> deltaf_; // Results of forward DFS
- Vec<int32_t> deltab_; // Results of backward DFS
- Vec<int32_t> list_; // All nodes to reprocess
- Vec<int32_t> merged_; // Rank values to assign to list_ entries
- Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
-
- Rep() : ptrmap_(&nodes_) {}
-};
-
-static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
- Node* n = rep->nodes_[NodeIndex(id)];
- return (n->version == NodeVersion(id)) ? n : nullptr;
-}
-
-GraphCycles::GraphCycles() {
- InitArenaIfNecessary();
- rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
- Rep;
-}
-
-GraphCycles::~GraphCycles() {
- for (auto* node : rep_->nodes_) {
- node->Node::~Node();
- base_internal::LowLevelAlloc::Free(node);
- }
- rep_->Rep::~Rep();
- base_internal::LowLevelAlloc::Free(rep_);
-}
-
-bool GraphCycles::CheckInvariants() const {
- Rep* r = rep_;
- NodeSet ranks; // Set of ranks seen so far.
- for (uint32_t x = 0; x < r->nodes_.size(); x++) {
- Node* nx = r->nodes_[x];
- void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
- if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
- ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
- }
- if (nx->visited) {
- ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
- }
- if (!ranks.insert(nx->rank)) {
- ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
- }
- HASH_FOR_EACH(y, nx->out) {
- Node* ny = r->nodes_[y];
- if (nx->rank >= ny->rank) {
- ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
- nx->rank, ny->rank);
- }
- }
- }
- return true;
-}
-
-GraphId GraphCycles::GetId(void* ptr) {
- int32_t i = rep_->ptrmap_.Find(ptr);
- if (i != -1) {
- return MakeId(i, rep_->nodes_[i]->version);
- } else if (rep_->free_nodes_.empty()) {
- Node* n =
- new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
- Node;
- n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
- n->visited = false;
- n->rank = rep_->nodes_.size();
- n->masked_ptr = base_internal::HidePtr(ptr);
- n->nstack = 0;
- n->priority = 0;
- rep_->nodes_.push_back(n);
- rep_->ptrmap_.Add(ptr, n->rank);
- return MakeId(n->rank, n->version);
- } else {
- // Preserve preceding rank since the set of ranks in use must be
- // a permutation of [0,rep_->nodes_.size()-1].
- int32_t r = rep_->free_nodes_.back();
- rep_->free_nodes_.pop_back();
- Node* n = rep_->nodes_[r];
- n->masked_ptr = base_internal::HidePtr(ptr);
- n->nstack = 0;
- n->priority = 0;
- rep_->ptrmap_.Add(ptr, r);
- return MakeId(r, n->version);
- }
-}
-
-void GraphCycles::RemoveNode(void* ptr) {
- int32_t i = rep_->ptrmap_.Remove(ptr);
- if (i == -1) {
- return;
- }
- Node* x = rep_->nodes_[i];
- HASH_FOR_EACH(y, x->out) {
- rep_->nodes_[y]->in.erase(i);
- }
- HASH_FOR_EACH(y, x->in) {
- rep_->nodes_[y]->out.erase(i);
- }
- x->in.clear();
- x->out.clear();
- x->masked_ptr = base_internal::HidePtr<void>(nullptr);
- if (x->version == std::numeric_limits<uint32_t>::max()) {
- // Cannot use x any more
- } else {
- x->version++; // Invalidates all copies of node.
- rep_->free_nodes_.push_back(i);
- }
-}
-
-void* GraphCycles::Ptr(GraphId id) {
- Node* n = FindNode(rep_, id);
- return n == nullptr ? nullptr
- : base_internal::UnhidePtr<void>(n->masked_ptr);
-}
-
-bool GraphCycles::HasNode(GraphId node) {
- return FindNode(rep_, node) != nullptr;
-}
-
-bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
- Node* xn = FindNode(rep_, x);
- return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
-}
-
-void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
- Node* xn = FindNode(rep_, x);
- Node* yn = FindNode(rep_, y);
- if (xn && yn) {
- xn->out.erase(NodeIndex(y));
- yn->in.erase(NodeIndex(x));
- // No need to update the rank assignment since a previous valid
- // rank assignment remains valid after an edge deletion.
- }
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
-static void Reorder(GraphCycles::Rep* r);
-static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
-static void MoveToList(
- GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
-
-bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
- Rep* r = rep_;
- const int32_t x = NodeIndex(idx);
- const int32_t y = NodeIndex(idy);
- Node* nx = FindNode(r, idx);
- Node* ny = FindNode(r, idy);
- if (nx == nullptr || ny == nullptr) return true; // Expired ids
-
- if (nx == ny) return false; // Self edge
- if (!nx->out.insert(y)) {
- // Edge already exists.
- return true;
- }
-
- ny->in.insert(x);
-
- if (nx->rank <= ny->rank) {
- // New edge is consistent with existing rank assignment.
- return true;
- }
-
- // Current rank assignments are incompatible with the new edge. Recompute.
- // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
- if (!ForwardDFS(r, y, nx->rank)) {
- // Found a cycle. Undo the insertion and tell caller.
- nx->out.erase(y);
- ny->in.erase(x);
- // Since we do not call Reorder() on this path, clear any visited
- // markers left by ForwardDFS.
- for (const auto& d : r->deltaf_) {
- r->nodes_[d]->visited = false;
- }
- return false;
- }
- BackwardDFS(r, x, ny->rank);
- Reorder(r);
- return true;
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
- // Avoid recursion since stack space might be limited.
- // We instead keep a stack of nodes to visit.
- r->deltaf_.clear();
- r->stack_.clear();
- r->stack_.push_back(n);
- while (!r->stack_.empty()) {
- n = r->stack_.back();
- r->stack_.pop_back();
- Node* nn = r->nodes_[n];
- if (nn->visited) continue;
-
- nn->visited = true;
- r->deltaf_.push_back(n);
-
- HASH_FOR_EACH(w, nn->out) {
- Node* nw = r->nodes_[w];
- if (nw->rank == upper_bound) {
- return false; // Cycle
- }
- if (!nw->visited && nw->rank < upper_bound) {
- r->stack_.push_back(w);
- }
- }
- }
- return true;
-}
-
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
- r->deltab_.clear();
- r->stack_.clear();
- r->stack_.push_back(n);
- while (!r->stack_.empty()) {
- n = r->stack_.back();
- r->stack_.pop_back();
- Node* nn = r->nodes_[n];
- if (nn->visited) continue;
-
- nn->visited = true;
- r->deltab_.push_back(n);
-
- HASH_FOR_EACH(w, nn->in) {
- Node* nw = r->nodes_[w];
- if (!nw->visited && lower_bound < nw->rank) {
- r->stack_.push_back(w);
- }
- }
- }
-}
-
-static void Reorder(GraphCycles::Rep* r) {
- Sort(r->nodes_, &r->deltab_);
- Sort(r->nodes_, &r->deltaf_);
-
- // Adds contents of delta lists to list_ (backwards deltas first).
- r->list_.clear();
- MoveToList(r, &r->deltab_, &r->list_);
- MoveToList(r, &r->deltaf_, &r->list_);
-
- // Produce sorted list of all ranks that will be reassigned.
- r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
- std::merge(r->deltab_.begin(), r->deltab_.end(),
- r->deltaf_.begin(), r->deltaf_.end(),
- r->merged_.begin());
-
- // Assign the ranks in order to the collected list.
- for (uint32_t i = 0; i < r->list_.size(); i++) {
- r->nodes_[r->list_[i]]->rank = r->merged_[i];
- }
-}
-
-static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
- struct ByRank {
- const Vec<Node*>* nodes;
- bool operator()(int32_t a, int32_t b) const {
- return (*nodes)[a]->rank < (*nodes)[b]->rank;
- }
- };
- ByRank cmp;
- cmp.nodes = &nodes;
- std::sort(delta->begin(), delta->end(), cmp);
-}
-
-static void MoveToList(
- GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
- for (auto& v : *src) {
- int32_t w = v;
- v = r->nodes_[w]->rank; // Replace v entry with its rank
- r->nodes_[w]->visited = false; // Prepare for future DFS calls
- dst->push_back(w);
- }
-}
-
-int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
- GraphId path[]) const {
- Rep* r = rep_;
- if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
- const int32_t x = NodeIndex(idx);
- const int32_t y = NodeIndex(idy);
-
- // Forward depth first search starting at x until we hit y.
- // As we descend into a node, we push it onto the path.
- // As we leave a node, we remove it from the path.
- int path_len = 0;
-
- NodeSet seen;
- r->stack_.clear();
- r->stack_.push_back(x);
- while (!r->stack_.empty()) {
- int32_t n = r->stack_.back();
- r->stack_.pop_back();
- if (n < 0) {
- // Marker to indicate that we are leaving a node
- path_len--;
- continue;
- }
-
- if (path_len < max_path_len) {
- path[path_len] = MakeId(n, rep_->nodes_[n]->version);
- }
- path_len++;
- r->stack_.push_back(-1); // Will remove tentative path entry
-
- if (n == y) {
- return path_len;
- }
-
- HASH_FOR_EACH(w, r->nodes_[n]->out) {
- if (seen.insert(w)) {
- r->stack_.push_back(w);
- }
- }
- }
-
- return 0;
-}
-
-bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
- return FindPath(x, y, 0, nullptr) > 0;
-}
-
-void GraphCycles::UpdateStackTrace(GraphId id, int priority,
- int (*get_stack_trace)(void** stack, int)) {
- Node* n = FindNode(rep_, id);
- if (n == nullptr || n->priority >= priority) {
- return;
- }
- n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
- n->priority = priority;
-}
-
-int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
- Node* n = FindNode(rep_, id);
- if (n == nullptr) {
- *ptr = nullptr;
- return 0;
- } else {
- *ptr = n->stack;
- return n->nstack;
- }
-}
-
-} // namespace synchronization_internal
+
+static void InitArenaIfNecessary() {
+ arena_mu.Lock();
+ if (arena == nullptr) {
+ arena = base_internal::LowLevelAlloc::NewArena(0);
+ }
+ arena_mu.Unlock();
+}
+
+// Number of inlined elements in Vec. Hash table implementation
+// relies on this being a power of two.
+static const uint32_t kInline = 8;
+
+// A simple LowLevelAlloc based resizable vector with inlined storage
+// for a few elements. T must be a plain type since constructor
+// and destructor are not run on elements of type T managed by Vec.
+template <typename T>
+class Vec {
+ public:
+ Vec() { Init(); }
+ ~Vec() { Discard(); }
+
+ void clear() {
+ Discard();
+ Init();
+ }
+
+ bool empty() const { return size_ == 0; }
+ uint32_t size() const { return size_; }
+ T* begin() { return ptr_; }
+ T* end() { return ptr_ + size_; }
+ const T& operator[](uint32_t i) const { return ptr_[i]; }
+ T& operator[](uint32_t i) { return ptr_[i]; }
+ const T& back() const { return ptr_[size_-1]; }
+ void pop_back() { size_--; }
+
+ void push_back(const T& v) {
+ if (size_ == capacity_) Grow(size_ + 1);
+ ptr_[size_] = v;
+ size_++;
+ }
+
+ void resize(uint32_t n) {
+ if (n > capacity_) Grow(n);
+ size_ = n;
+ }
+
+ void fill(const T& val) {
+ for (uint32_t i = 0; i < size(); i++) {
+ ptr_[i] = val;
+ }
+ }
+
+ // Guarantees src is empty at end.
+ // Provided for the hash table resizing code below.
+ void MoveFrom(Vec<T>* src) {
+ if (src->ptr_ == src->space_) {
+ // Need to actually copy
+ resize(src->size_);
+ std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ src->size_ = 0;
+ } else {
+ Discard();
+ ptr_ = src->ptr_;
+ size_ = src->size_;
+ capacity_ = src->capacity_;
+ src->Init();
+ }
+ }
+
+ private:
+ T* ptr_;
+ T space_[kInline];
+ uint32_t size_;
+ uint32_t capacity_;
+
+ void Init() {
+ ptr_ = space_;
+ size_ = 0;
+ capacity_ = kInline;
+ }
+
+ void Discard() {
+ if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
+ }
+
+ void Grow(uint32_t n) {
+ while (capacity_ < n) {
+ capacity_ *= 2;
+ }
+ size_t request = static_cast<size_t>(capacity_) * sizeof(T);
+ T* copy = static_cast<T*>(
+ base_internal::LowLevelAlloc::AllocWithArena(request, arena));
+ std::copy(ptr_, ptr_ + size_, copy);
+ Discard();
+ ptr_ = copy;
+ }
+
+ Vec(const Vec&) = delete;
+ Vec& operator=(const Vec&) = delete;
+};
+
+// A hash set of non-negative int32_t that uses Vec for its underlying storage.
+class NodeSet {
+ public:
+ NodeSet() { Init(); }
+
+ void clear() { Init(); }
+ bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
+
+ bool insert(int32_t v) {
+ uint32_t i = FindIndex(v);
+ if (table_[i] == v) {
+ return false;
+ }
+ if (table_[i] == kEmpty) {
+ // Only inserting over an empty cell increases the number of occupied
+ // slots.
+ occupied_++;
+ }
+ table_[i] = v;
+ // Double when 75% full.
+ if (occupied_ >= table_.size() - table_.size()/4) Grow();
+ return true;
+ }
+
+ void erase(uint32_t v) {
+ uint32_t i = FindIndex(v);
+ if (static_cast<uint32_t>(table_[i]) == v) {
+ table_[i] = kDel;
+ }
+ }
+
+ // Iteration: is done via HASH_FOR_EACH
+ // Example:
+ // HASH_FOR_EACH(elem, node->out) { ... }
+#define HASH_FOR_EACH(elem, eset) \
+ for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
+ bool Next(int32_t* cursor, int32_t* elem) {
+ while (static_cast<uint32_t>(*cursor) < table_.size()) {
+ int32_t v = table_[*cursor];
+ (*cursor)++;
+ if (v >= 0) {
+ *elem = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ enum : int32_t { kEmpty = -1, kDel = -2 };
+ Vec<int32_t> table_;
+ uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
+
+ static uint32_t Hash(uint32_t a) { return a * 41; }
+
+ // Return index for storing v. May return an empty index or deleted index
+ int FindIndex(int32_t v) const {
+ // Search starting at hash index.
+ const uint32_t mask = table_.size() - 1;
+ uint32_t i = Hash(v) & mask;
+ int deleted_index = -1; // If >= 0, index of first deleted element we see
+ while (true) {
+ int32_t e = table_[i];
+ if (v == e) {
+ return i;
+ } else if (e == kEmpty) {
+ // Return any previously encountered deleted slot.
+ return (deleted_index >= 0) ? deleted_index : i;
+ } else if (e == kDel && deleted_index < 0) {
+ // Keep searching since v might be present later.
+ deleted_index = i;
+ }
+ i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
+ }
+ }
+
+ void Init() {
+ table_.clear();
+ table_.resize(kInline);
+ table_.fill(kEmpty);
+ occupied_ = 0;
+ }
+
+ void Grow() {
+ Vec<int32_t> copy;
+ copy.MoveFrom(&table_);
+ occupied_ = 0;
+ table_.resize(copy.size() * 2);
+ table_.fill(kEmpty);
+
+ for (const auto& e : copy) {
+ if (e >= 0) insert(e);
+ }
+ }
+
+ NodeSet(const NodeSet&) = delete;
+ NodeSet& operator=(const NodeSet&) = delete;
+};
+
+// We encode a node index and a node version in GraphId. The version
+// number is incremented when the GraphId is freed which automatically
+// invalidates all copies of the GraphId.
+
+inline GraphId MakeId(int32_t index, uint32_t version) {
+ GraphId g;
+ g.handle =
+ (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
+ return g;
+}
+
+inline int32_t NodeIndex(GraphId id) {
+ return static_cast<uint32_t>(id.handle & 0xfffffffful);
+}
+
+inline uint32_t NodeVersion(GraphId id) {
+ return static_cast<uint32_t>(id.handle >> 32);
+}
+
+struct Node {
+ int32_t rank; // rank number assigned by Pearce-Kelly algorithm
+ uint32_t version; // Current version number
+ int32_t next_hash; // Next entry in hash table
+ bool visited; // Temporary marker used by depth-first-search
+ uintptr_t masked_ptr; // User-supplied pointer
+ NodeSet in; // List of immediate predecessor nodes in graph
+ NodeSet out; // List of immediate successor nodes in graph
+ int priority; // Priority of recorded stack trace.
+ int nstack; // Depth of recorded stack trace.
+ void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
+};
+
+// Hash table for pointer to node index lookups.
+class PointerMap {
+ public:
+ explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
+ table_.fill(-1);
+ }
+
+ int32_t Find(void* ptr) {
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t i = table_[Hash(ptr)]; i != -1;) {
+ Node* n = (*nodes_)[i];
+ if (n->masked_ptr == masked) return i;
+ i = n->next_hash;
+ }
+ return -1;
+ }
+
+ void Add(void* ptr, int32_t i) {
+ int32_t* head = &table_[Hash(ptr)];
+ (*nodes_)[i]->next_hash = *head;
+ *head = i;
+ }
+
+ int32_t Remove(void* ptr) {
+ // Advance through linked list while keeping track of the
+ // predecessor slot that points to the current entry.
+ auto masked = base_internal::HidePtr(ptr);
+ for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
+ int32_t index = *slot;
+ Node* n = (*nodes_)[index];
+ if (n->masked_ptr == masked) {
+ *slot = n->next_hash; // Remove n from linked list
+ n->next_hash = -1;
+ return index;
+ }
+ slot = &n->next_hash;
+ }
+ return -1;
+ }
+
+ private:
+ // Number of buckets in hash table for pointer lookups.
+ static constexpr uint32_t kHashTableSize = 8171; // should be prime
+
+ const Vec<Node*>* nodes_;
+ std::array<int32_t, kHashTableSize> table_;
+
+ static uint32_t Hash(void* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
+ }
+};
+
+} // namespace
+
+struct GraphCycles::Rep {
+ Vec<Node*> nodes_;
+ Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
+ PointerMap ptrmap_;
+
+ // Temporary state.
+ Vec<int32_t> deltaf_; // Results of forward DFS
+ Vec<int32_t> deltab_; // Results of backward DFS
+ Vec<int32_t> list_; // All nodes to reprocess
+ Vec<int32_t> merged_; // Rank values to assign to list_ entries
+ Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
+
+ Rep() : ptrmap_(&nodes_) {}
+};
+
+static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
+ Node* n = rep->nodes_[NodeIndex(id)];
+ return (n->version == NodeVersion(id)) ? n : nullptr;
+}
+
+GraphCycles::GraphCycles() {
+ InitArenaIfNecessary();
+ rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
+ Rep;
+}
+
+GraphCycles::~GraphCycles() {
+ for (auto* node : rep_->nodes_) {
+ node->Node::~Node();
+ base_internal::LowLevelAlloc::Free(node);
+ }
+ rep_->Rep::~Rep();
+ base_internal::LowLevelAlloc::Free(rep_);
+}
+
+bool GraphCycles::CheckInvariants() const {
+ Rep* r = rep_;
+ NodeSet ranks; // Set of ranks seen so far.
+ for (uint32_t x = 0; x < r->nodes_.size(); x++) {
+ Node* nx = r->nodes_[x];
+ void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
+ if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ }
+ if (nx->visited) {
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ }
+ if (!ranks.insert(nx->rank)) {
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ }
+ HASH_FOR_EACH(y, nx->out) {
+ Node* ny = r->nodes_[y];
+ if (nx->rank >= ny->rank) {
+ ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
+ nx->rank, ny->rank);
+ }
+ }
+ }
+ return true;
+}
+
+GraphId GraphCycles::GetId(void* ptr) {
+ int32_t i = rep_->ptrmap_.Find(ptr);
+ if (i != -1) {
+ return MakeId(i, rep_->nodes_[i]->version);
+ } else if (rep_->free_nodes_.empty()) {
+ Node* n =
+ new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
+ Node;
+ n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
+ n->visited = false;
+ n->rank = rep_->nodes_.size();
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->nodes_.push_back(n);
+ rep_->ptrmap_.Add(ptr, n->rank);
+ return MakeId(n->rank, n->version);
+ } else {
+ // Preserve preceding rank since the set of ranks in use must be
+ // a permutation of [0,rep_->nodes_.size()-1].
+ int32_t r = rep_->free_nodes_.back();
+ rep_->free_nodes_.pop_back();
+ Node* n = rep_->nodes_[r];
+ n->masked_ptr = base_internal::HidePtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->ptrmap_.Add(ptr, r);
+ return MakeId(r, n->version);
+ }
+}
+
+void GraphCycles::RemoveNode(void* ptr) {
+ int32_t i = rep_->ptrmap_.Remove(ptr);
+ if (i == -1) {
+ return;
+ }
+ Node* x = rep_->nodes_[i];
+ HASH_FOR_EACH(y, x->out) {
+ rep_->nodes_[y]->in.erase(i);
+ }
+ HASH_FOR_EACH(y, x->in) {
+ rep_->nodes_[y]->out.erase(i);
+ }
+ x->in.clear();
+ x->out.clear();
+ x->masked_ptr = base_internal::HidePtr<void>(nullptr);
+ if (x->version == std::numeric_limits<uint32_t>::max()) {
+ // Cannot use x any more
+ } else {
+ x->version++; // Invalidates all copies of node.
+ rep_->free_nodes_.push_back(i);
+ }
+}
+
+void* GraphCycles::Ptr(GraphId id) {
+ Node* n = FindNode(rep_, id);
+ return n == nullptr ? nullptr
+ : base_internal::UnhidePtr<void>(n->masked_ptr);
+}
+
+bool GraphCycles::HasNode(GraphId node) {
+ return FindNode(rep_, node) != nullptr;
+}
+
+bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
+ Node* xn = FindNode(rep_, x);
+ return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
+}
+
+void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
+ Node* xn = FindNode(rep_, x);
+ Node* yn = FindNode(rep_, y);
+ if (xn && yn) {
+ xn->out.erase(NodeIndex(y));
+ yn->in.erase(NodeIndex(x));
+ // No need to update the rank assignment since a previous valid
+ // rank assignment remains valid after an edge deletion.
+ }
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
+static void Reorder(GraphCycles::Rep* r);
+static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
+
+bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
+ Rep* r = rep_;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+ Node* nx = FindNode(r, idx);
+ Node* ny = FindNode(r, idy);
+ if (nx == nullptr || ny == nullptr) return true; // Expired ids
+
+ if (nx == ny) return false; // Self edge
+ if (!nx->out.insert(y)) {
+ // Edge already exists.
+ return true;
+ }
+
+ ny->in.insert(x);
+
+ if (nx->rank <= ny->rank) {
+ // New edge is consistent with existing rank assignment.
+ return true;
+ }
+
+ // Current rank assignments are incompatible with the new edge. Recompute.
+ // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
+ if (!ForwardDFS(r, y, nx->rank)) {
+ // Found a cycle. Undo the insertion and tell caller.
+ nx->out.erase(y);
+ ny->in.erase(x);
+ // Since we do not call Reorder() on this path, clear any visited
+ // markers left by ForwardDFS.
+ for (const auto& d : r->deltaf_) {
+ r->nodes_[d]->visited = false;
+ }
+ return false;
+ }
+ BackwardDFS(r, x, ny->rank);
+ Reorder(r);
+ return true;
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
+ // Avoid recursion since stack space might be limited.
+ // We instead keep a stack of nodes to visit.
+ r->deltaf_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltaf_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->out) {
+ Node* nw = r->nodes_[w];
+ if (nw->rank == upper_bound) {
+ return false; // Cycle
+ }
+ if (!nw->visited && nw->rank < upper_bound) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+ return true;
+}
+
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
+ r->deltab_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltab_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->in) {
+ Node* nw = r->nodes_[w];
+ if (!nw->visited && lower_bound < nw->rank) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+}
+
+static void Reorder(GraphCycles::Rep* r) {
+ Sort(r->nodes_, &r->deltab_);
+ Sort(r->nodes_, &r->deltaf_);
+
+ // Adds contents of delta lists to list_ (backwards deltas first).
+ r->list_.clear();
+ MoveToList(r, &r->deltab_, &r->list_);
+ MoveToList(r, &r->deltaf_, &r->list_);
+
+ // Produce sorted list of all ranks that will be reassigned.
+ r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
+ std::merge(r->deltab_.begin(), r->deltab_.end(),
+ r->deltaf_.begin(), r->deltaf_.end(),
+ r->merged_.begin());
+
+ // Assign the ranks in order to the collected list.
+ for (uint32_t i = 0; i < r->list_.size(); i++) {
+ r->nodes_[r->list_[i]]->rank = r->merged_[i];
+ }
+}
+
+static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
+ struct ByRank {
+ const Vec<Node*>* nodes;
+ bool operator()(int32_t a, int32_t b) const {
+ return (*nodes)[a]->rank < (*nodes)[b]->rank;
+ }
+ };
+ ByRank cmp;
+ cmp.nodes = &nodes;
+ std::sort(delta->begin(), delta->end(), cmp);
+}
+
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
+ for (auto& v : *src) {
+ int32_t w = v;
+ v = r->nodes_[w]->rank; // Replace v entry with its rank
+ r->nodes_[w]->visited = false; // Prepare for future DFS calls
+ dst->push_back(w);
+ }
+}
+
+int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
+ GraphId path[]) const {
+ Rep* r = rep_;
+ if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+
+ // Forward depth first search starting at x until we hit y.
+ // As we descend into a node, we push it onto the path.
+ // As we leave a node, we remove it from the path.
+ int path_len = 0;
+
+ NodeSet seen;
+ r->stack_.clear();
+ r->stack_.push_back(x);
+ while (!r->stack_.empty()) {
+ int32_t n = r->stack_.back();
+ r->stack_.pop_back();
+ if (n < 0) {
+ // Marker to indicate that we are leaving a node
+ path_len--;
+ continue;
+ }
+
+ if (path_len < max_path_len) {
+ path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+ }
+ path_len++;
+ r->stack_.push_back(-1); // Will remove tentative path entry
+
+ if (n == y) {
+ return path_len;
+ }
+
+ HASH_FOR_EACH(w, r->nodes_[n]->out) {
+ if (seen.insert(w)) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
+ return FindPath(x, y, 0, nullptr) > 0;
+}
+
+void GraphCycles::UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void** stack, int)) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr || n->priority >= priority) {
+ return;
+ }
+ n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
+ n->priority = priority;
+}
+
+int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr) {
+ *ptr = nullptr;
+ return 0;
+ } else {
+ *ptr = n->stack;
+ return n->nstack;
+ }
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
index 14af15ce00..ceba33e4de 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/graphcycles.h
@@ -1,141 +1,141 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-
-// GraphCycles detects the introduction of a cycle into a directed
-// graph that is being built up incrementally.
-//
-// Nodes are identified by small integers. It is not possible to
-// record multiple edges with the same (source, destination) pair;
-// requests to add an edge where one already exists are silently
-// ignored.
-//
-// It is also not possible to introduce a cycle; an attempt to insert
-// an edge that would introduce a cycle fails and returns false.
-//
-// GraphCycles uses no internal locking; calls into it should be
-// serialized externally.
-
-// Performance considerations:
-// Works well on sparse graphs, poorly on dense graphs.
-// Extra information is maintained incrementally to detect cycles quickly.
-// InsertEdge() is very fast when the edge already exists, and reasonably fast
-// otherwise.
-// FindPath() is linear in the size of the graph.
-// The current implementation uses O(|V|+|E|) space.
-
-#include <cstdint>
-
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+
+// GraphCycles detects the introduction of a cycle into a directed
+// graph that is being built up incrementally.
+//
+// Nodes are identified by small integers. It is not possible to
+// record multiple edges with the same (source, destination) pair;
+// requests to add an edge where one already exists are silently
+// ignored.
+//
+// It is also not possible to introduce a cycle; an attempt to insert
+// an edge that would introduce a cycle fails and returns false.
+//
+// GraphCycles uses no internal locking; calls into it should be
+// serialized externally.
+
+// Performance considerations:
+// Works well on sparse graphs, poorly on dense graphs.
+// Extra information is maintained incrementally to detect cycles quickly.
+// InsertEdge() is very fast when the edge already exists, and reasonably fast
+// otherwise.
+// FindPath() is linear in the size of the graph.
+// The current implementation uses O(|V|+|E|) space.
+
+#include <cstdint>
+
#include "absl/base/config.h"
-namespace absl {
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Opaque identifier for a graph node.
-struct GraphId {
- uint64_t handle;
-
- bool operator==(const GraphId& x) const { return handle == x.handle; }
- bool operator!=(const GraphId& x) const { return handle != x.handle; }
-};
-
-// Return an invalid graph id that will never be assigned by GraphCycles.
-inline GraphId InvalidGraphId() {
- return GraphId{0};
-}
-
-class GraphCycles {
- public:
- GraphCycles();
- ~GraphCycles();
-
- // Return the id to use for ptr, assigning one if necessary.
- // Subsequent calls with the same ptr value will return the same id
- // until Remove().
- GraphId GetId(void* ptr);
-
- // Remove "ptr" from the graph. Its corresponding node and all
- // edges to and from it are removed.
- void RemoveNode(void* ptr);
-
- // Return the pointer associated with id, or nullptr if id is not
- // currently in the graph.
- void* Ptr(GraphId id);
-
- // Attempt to insert an edge from source_node to dest_node. If the
- // edge would introduce a cycle, return false without making any
- // changes. Otherwise add the edge and return true.
- bool InsertEdge(GraphId source_node, GraphId dest_node);
-
- // Remove any edge that exists from source_node to dest_node.
- void RemoveEdge(GraphId source_node, GraphId dest_node);
-
- // Return whether node exists in the graph.
- bool HasNode(GraphId node);
-
- // Return whether there is an edge directly from source_node to dest_node.
- bool HasEdge(GraphId source_node, GraphId dest_node) const;
-
- // Return whether dest_node is reachable from source_node
- // by following edges.
- bool IsReachable(GraphId source_node, GraphId dest_node) const;
-
- // Find a path from "source" to "dest". If such a path exists,
- // place the nodes on the path in the array path[], and return
- // the number of nodes on the path. If the path is longer than
- // max_path_len nodes, only the first max_path_len nodes are placed
- // in path[]. The client should compare the return value with
- // max_path_len" to see when this occurs. If no path exists, return
- // 0. Any valid path stored in path[] will start with "source" and
- // end with "dest". There is no guarantee that the path is the
- // shortest, but no node will appear twice in the path, except the
- // source and destination node if they are identical; therefore, the
- // return value is at most one greater than the number of nodes in
- // the graph.
- int FindPath(GraphId source, GraphId dest, int max_path_len,
- GraphId path[]) const;
-
- // Update the stack trace recorded for id with the current stack
- // trace if the last time it was updated had a smaller priority
- // than the priority passed on this call.
- //
- // *get_stack_trace is called to get the stack trace.
- void UpdateStackTrace(GraphId id, int priority,
- int (*get_stack_trace)(void**, int));
-
- // Set *ptr to the beginning of the array that holds the recorded
- // stack trace for id and return the depth of the stack trace.
- int GetStackTrace(GraphId id, void*** ptr);
-
- // Check internal invariants. Crashes on failure, returns true on success.
- // Expensive: should only be called from graphcycles_test.cc.
- bool CheckInvariants() const;
-
- // ----------------------------------------------------
- struct Rep;
- private:
- Rep *rep_; // opaque representation
- GraphCycles(const GraphCycles&) = delete;
- GraphCycles& operator=(const GraphCycles&) = delete;
-};
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// Opaque identifier for a graph node.
+struct GraphId {
+ uint64_t handle;
+
+ bool operator==(const GraphId& x) const { return handle == x.handle; }
+ bool operator!=(const GraphId& x) const { return handle != x.handle; }
+};
+
+// Return an invalid graph id that will never be assigned by GraphCycles.
+inline GraphId InvalidGraphId() {
+ return GraphId{0};
+}
+
+class GraphCycles {
+ public:
+ GraphCycles();
+ ~GraphCycles();
+
+ // Return the id to use for ptr, assigning one if necessary.
+ // Subsequent calls with the same ptr value will return the same id
+ // until Remove().
+ GraphId GetId(void* ptr);
+
+ // Remove "ptr" from the graph. Its corresponding node and all
+ // edges to and from it are removed.
+ void RemoveNode(void* ptr);
+
+ // Return the pointer associated with id, or nullptr if id is not
+ // currently in the graph.
+ void* Ptr(GraphId id);
+
+ // Attempt to insert an edge from source_node to dest_node. If the
+ // edge would introduce a cycle, return false without making any
+ // changes. Otherwise add the edge and return true.
+ bool InsertEdge(GraphId source_node, GraphId dest_node);
+
+ // Remove any edge that exists from source_node to dest_node.
+ void RemoveEdge(GraphId source_node, GraphId dest_node);
+
+ // Return whether node exists in the graph.
+ bool HasNode(GraphId node);
+
+ // Return whether there is an edge directly from source_node to dest_node.
+ bool HasEdge(GraphId source_node, GraphId dest_node) const;
+
+ // Return whether dest_node is reachable from source_node
+ // by following edges.
+ bool IsReachable(GraphId source_node, GraphId dest_node) const;
+
+ // Find a path from "source" to "dest". If such a path exists,
+ // place the nodes on the path in the array path[], and return
+ // the number of nodes on the path. If the path is longer than
+ // max_path_len nodes, only the first max_path_len nodes are placed
+ // in path[]. The client should compare the return value with
+ // max_path_len" to see when this occurs. If no path exists, return
+ // 0. Any valid path stored in path[] will start with "source" and
+ // end with "dest". There is no guarantee that the path is the
+ // shortest, but no node will appear twice in the path, except the
+ // source and destination node if they are identical; therefore, the
+ // return value is at most one greater than the number of nodes in
+ // the graph.
+ int FindPath(GraphId source, GraphId dest, int max_path_len,
+ GraphId path[]) const;
+
+ // Update the stack trace recorded for id with the current stack
+ // trace if the last time it was updated had a smaller priority
+ // than the priority passed on this call.
+ //
+ // *get_stack_trace is called to get the stack trace.
+ void UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void**, int));
+
+ // Set *ptr to the beginning of the array that holds the recorded
+ // stack trace for id and return the depth of the stack trace.
+ int GetStackTrace(GraphId id, void*** ptr);
+
+ // Check internal invariants. Crashes on failure, returns true on success.
+ // Expensive: should only be called from graphcycles_test.cc.
+ bool CheckInvariants() const;
+
+ // ----------------------------------------------------
+ struct Rep;
+ private:
+ Rep *rep_; // opaque representation
+ GraphCycles(const GraphCycles&) = delete;
+ GraphCycles& operator=(const GraphCycles&) = delete;
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif
+} // namespace absl
+
+#endif
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
index 5714fdb05c..bbd4d2d70f 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/kernel_timeout.h
@@ -1,130 +1,130 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components that are friends of this class. Higher-level components
-// should build APIs based on absl::Time and absl::Duration.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-
-#include <time.h>
-
-#include <algorithm>
-#include <limits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// An optional absolute timeout, with nanosecond granularity,
+// compatible with absl::Time. Suitable for in-register
+// parameter-passing (e.g. syscalls.)
+// Constructible from a absl::Time (for a timeout to be respected) or {}
+// (for "no timeout".)
+// This is a private low-level API for use by a handful of low-level
+// components that are friends of this class. Higher-level components
+// should build APIs based on absl::Time and absl::Duration.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+
+#include <time.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-class Futex;
-class Waiter;
-
-class KernelTimeout {
- public:
- // A timeout that should expire at <t>. Any value, in the full
- // InfinitePast() to InfiniteFuture() range, is valid here and will be
- // respected.
- explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
- // No timeout.
- KernelTimeout() : ns_(0) {}
-
- // A more explicit factory for those who prefer it. Equivalent to {}.
- static KernelTimeout Never() { return {}; }
-
- // We explicitly do not support other custom formats: timespec, int64_t nanos.
- // Unify on this and absl::Time, please.
-
- bool has_timeout() const { return ns_ != 0; }
-
+namespace synchronization_internal {
+
+class Futex;
+class Waiter;
+
+class KernelTimeout {
+ public:
+ // A timeout that should expire at <t>. Any value, in the full
+ // InfinitePast() to InfiniteFuture() range, is valid here and will be
+ // respected.
+ explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
+ // No timeout.
+ KernelTimeout() : ns_(0) {}
+
+ // A more explicit factory for those who prefer it. Equivalent to {}.
+ static KernelTimeout Never() { return {}; }
+
+ // We explicitly do not support other custom formats: timespec, int64_t nanos.
+ // Unify on this and absl::Time, please.
+
+ bool has_timeout() const { return ns_ != 0; }
+
// Convert to parameter for sem_timedwait/futex/similar. Only for approved
// users. Do not call if !has_timeout.
struct timespec MakeAbsTimespec();
- private:
- // internal rep, not user visible: ns after unix epoch.
- // zero = no timeout.
- // Negative we treat as an unlikely (and certainly expired!) but valid
- // timeout.
- int64_t ns_;
-
- static int64_t MakeNs(absl::Time t) {
- // optimization--InfiniteFuture is common "no timeout" value
- // and cheaper to compare than convert.
- if (t == absl::InfiniteFuture()) return 0;
- int64_t x = ToUnixNanos(t);
-
- // A timeout that lands exactly on the epoch (x=0) needs to be respected,
- // so we alter it unnoticably to 1. Negative timeouts are in
- // theory supported, but handled poorly by the kernel (long
- // delays) so push them forward too; since all such times have
- // already passed, it's indistinguishable.
- if (x <= 0) x = 1;
- // A time larger than what can be represented to the kernel is treated
- // as no timeout.
- if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
- return x;
- }
-
-#ifdef _WIN32
- // Converts to milliseconds from now, or INFINITE when
- // !has_timeout(). For use by SleepConditionVariableSRW on
- // Windows. Callers should recognize that the return value is a
- // relative duration (it should be recomputed by calling this method
- // in the case of a spurious wakeup).
- // This header file may be included transitively by public header files,
- // so we define our own DWORD and INFINITE instead of getting them from
- // <intsafe.h> and <WinBase.h>.
- typedef unsigned long DWord; // NOLINT
- DWord InMillisecondsFromNow() const {
- constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
- if (!has_timeout()) {
- return kInfinite;
- }
- // The use of absl::Now() to convert from absolute time to
- // relative time means that absl::Now() cannot use anything that
- // depends on KernelTimeout (for example, Mutex) on Windows.
- int64_t now = ToUnixNanos(absl::Now());
- if (ns_ >= now) {
- // Round up so that Now() + ms_from_now >= ns_.
- constexpr uint64_t max_nanos =
- (std::numeric_limits<int64_t>::max)() - 999999u;
- uint64_t ms_from_now =
- (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
- }
- return 0;
- }
-#endif
-
- friend class Futex;
- friend class Waiter;
-};
-
+ private:
+ // internal rep, not user visible: ns after unix epoch.
+ // zero = no timeout.
+ // Negative we treat as an unlikely (and certainly expired!) but valid
+ // timeout.
+ int64_t ns_;
+
+ static int64_t MakeNs(absl::Time t) {
+ // optimization--InfiniteFuture is common "no timeout" value
+ // and cheaper to compare than convert.
+ if (t == absl::InfiniteFuture()) return 0;
+ int64_t x = ToUnixNanos(t);
+
+ // A timeout that lands exactly on the epoch (x=0) needs to be respected,
+ // so we alter it unnoticably to 1. Negative timeouts are in
+ // theory supported, but handled poorly by the kernel (long
+ // delays) so push them forward too; since all such times have
+ // already passed, it's indistinguishable.
+ if (x <= 0) x = 1;
+ // A time larger than what can be represented to the kernel is treated
+ // as no timeout.
+ if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
+ return x;
+ }
+
+#ifdef _WIN32
+ // Converts to milliseconds from now, or INFINITE when
+ // !has_timeout(). For use by SleepConditionVariableSRW on
+ // Windows. Callers should recognize that the return value is a
+ // relative duration (it should be recomputed by calling this method
+ // in the case of a spurious wakeup).
+ // This header file may be included transitively by public header files,
+ // so we define our own DWORD and INFINITE instead of getting them from
+ // <intsafe.h> and <WinBase.h>.
+ typedef unsigned long DWord; // NOLINT
+ DWord InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+ // The use of absl::Now() to convert from absolute time to
+ // relative time means that absl::Now() cannot use anything that
+ // depends on KernelTimeout (for example, Mutex) on Windows.
+ int64_t now = ToUnixNanos(absl::Now());
+ if (ns_ >= now) {
+ // Round up so that Now() + ms_from_now >= ns_.
+ constexpr uint64_t max_nanos =
+ (std::numeric_limits<int64_t>::max)() - 999999u;
+ uint64_t ms_from_now =
+ (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+ }
+ return 0;
+ }
+#endif
+
+ friend class Futex;
+ friend class Waiter;
+};
+
inline struct timespec KernelTimeout::MakeAbsTimespec() {
int64_t n = ns_;
static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
@@ -149,8 +149,8 @@ inline struct timespec KernelTimeout::MakeAbsTimespec() {
return abstime;
}
-} // namespace synchronization_internal
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
index 16fc09ef86..a6031787e0 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc
@@ -1,106 +1,106 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/waiter.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/waiter.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
- base_internal::ThreadIdentity *identity;
- identity = GetOrCreateCurrentThreadIdentity();
- identity->blocked_count_ptr = counter;
-}
-
-std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
- base_internal::ThreadIdentity *identity;
- identity = GetOrCreateCurrentThreadIdentity();
- return identity->blocked_count_ptr;
-}
-
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
- new (Waiter::GetWaiter(identity)) Waiter();
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
- Waiter::GetWaiter(identity)->~Waiter();
-}
-
-void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
- const int ticker =
- identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
- // Wakeup the waiting thread since it is time for it to become idle.
- Waiter::GetWaiter(identity)->Poke();
- }
-}
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ identity->blocked_count_ptr = counter;
+}
+
+std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ return identity->blocked_count_ptr;
+}
+
+void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
+ new (Waiter::GetWaiter(identity)) Waiter();
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+}
+
+void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
+ Waiter::GetWaiter(identity)->~Waiter();
+}
+
+void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
+ const int ticker =
+ identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
+ // Wakeup the waiting thread since it is time for it to become idle.
+ Waiter::GetWaiter(identity)->Poke();
+ }
+}
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-extern "C" {
-
+} // namespace absl
+
+extern "C" {
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
- absl::base_internal::ThreadIdentity *identity) {
- absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
-}
-
+ absl::base_internal::ThreadIdentity *identity) {
+ absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
+}
+
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
- absl::synchronization_internal::KernelTimeout t) {
- bool timeout = false;
- absl::base_internal::ThreadIdentity *identity;
- identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
-
- // Ensure wait_start != 0.
- int ticker = identity->ticker.load(std::memory_order_relaxed);
- identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-
- if (identity->blocked_count_ptr != nullptr) {
- // Increment count of threads blocked in a given thread pool.
- identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
- }
-
- timeout =
- !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
-
- if (identity->blocked_count_ptr != nullptr) {
- identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
- }
-
- identity->is_idle.store(false, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- return !timeout;
-}
-
-} // extern "C"
-
-#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
+ absl::synchronization_internal::KernelTimeout t) {
+ bool timeout = false;
+ absl::base_internal::ThreadIdentity *identity;
+ identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+
+ // Ensure wait_start != 0.
+ int ticker = identity->ticker.load(std::memory_order_relaxed);
+ identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ // Increment count of threads blocked in a given thread pool.
+ identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ timeout =
+ !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ return !timeout;
+}
+
+} // extern "C"
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
index 25187fcb98..7beae8ef1d 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/per_thread_sem.h
@@ -1,115 +1,115 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// PerThreadSem is a low-level synchronization primitive controlling the
-// runnability of a single thread, used internally by Mutex and CondVar.
-//
-// This is NOT a general-purpose synchronization mechanism, and should not be
-// used directly by applications. Applications should use Mutex and CondVar.
-//
-// The semantics of PerThreadSem are the same as that of a counting semaphore.
-// Each thread maintains an abstract "count" value associated with its identity.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-
-#include <atomic>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/create_thread_identity.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// PerThreadSem is a low-level synchronization primitive controlling the
+// runnability of a single thread, used internally by Mutex and CondVar.
+//
+// This is NOT a general-purpose synchronization mechanism, and should not be
+// used directly by applications. Applications should use Mutex and CondVar.
+//
+// The semantics of PerThreadSem are the same as that of a counting semaphore.
+// Each thread maintains an abstract "count" value associated with its identity.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+
+#include <atomic>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-
-class Mutex;
-
-namespace synchronization_internal {
-
-class PerThreadSem {
- public:
- PerThreadSem() = delete;
- PerThreadSem(const PerThreadSem&) = delete;
- PerThreadSem& operator=(const PerThreadSem&) = delete;
-
- // Routine invoked periodically (once a second) by a background thread.
- // Has no effect on user-visible state.
- static void Tick(base_internal::ThreadIdentity* identity);
-
- // ---------------------------------------------------------------------------
- // Routines used by autosizing threadpools to detect when threads are
- // blocked. Each thread has a counter pointer, initially zero. If non-zero,
- // the implementation atomically increments the counter when it blocks on a
- // semaphore, a decrements it again when it wakes. This allows a threadpool
- // to keep track of how many of its threads are blocked.
- // SetThreadBlockedCounter() should be used only by threadpool
- // implementations. GetThreadBlockedCounter() should be used by modules that
- // block threads; if the pointer returned is non-zero, the location should be
- // incremented before the thread blocks, and decremented after it wakes.
- static void SetThreadBlockedCounter(std::atomic<int> *counter);
- static std::atomic<int> *GetThreadBlockedCounter();
-
- private:
- // Create the PerThreadSem associated with "identity". Initializes count=0.
- // REQUIRES: May only be called by ThreadIdentity.
- static void Init(base_internal::ThreadIdentity* identity);
-
- // Destroy the PerThreadSem associated with "identity".
- // REQUIRES: May only be called by ThreadIdentity.
- static void Destroy(base_internal::ThreadIdentity* identity);
-
- // Increments "identity"'s count.
- static inline void Post(base_internal::ThreadIdentity* identity);
-
- // Waits until either our count > 0 or t has expired.
- // If count > 0, decrements count and returns true. Otherwise returns false.
- // !t.has_timeout() => Wait(t) will return true.
- static inline bool Wait(KernelTimeout t);
-
+
+class Mutex;
+
+namespace synchronization_internal {
+
+class PerThreadSem {
+ public:
+ PerThreadSem() = delete;
+ PerThreadSem(const PerThreadSem&) = delete;
+ PerThreadSem& operator=(const PerThreadSem&) = delete;
+
+ // Routine invoked periodically (once a second) by a background thread.
+ // Has no effect on user-visible state.
+ static void Tick(base_internal::ThreadIdentity* identity);
+
+ // ---------------------------------------------------------------------------
+ // Routines used by autosizing threadpools to detect when threads are
+ // blocked. Each thread has a counter pointer, initially zero. If non-zero,
+ // the implementation atomically increments the counter when it blocks on a
+ // semaphore, a decrements it again when it wakes. This allows a threadpool
+ // to keep track of how many of its threads are blocked.
+ // SetThreadBlockedCounter() should be used only by threadpool
+ // implementations. GetThreadBlockedCounter() should be used by modules that
+ // block threads; if the pointer returned is non-zero, the location should be
+ // incremented before the thread blocks, and decremented after it wakes.
+ static void SetThreadBlockedCounter(std::atomic<int> *counter);
+ static std::atomic<int> *GetThreadBlockedCounter();
+
+ private:
+ // Create the PerThreadSem associated with "identity". Initializes count=0.
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Init(base_internal::ThreadIdentity* identity);
+
+ // Destroy the PerThreadSem associated with "identity".
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Destroy(base_internal::ThreadIdentity* identity);
+
+ // Increments "identity"'s count.
+ static inline void Post(base_internal::ThreadIdentity* identity);
+
+ // Waits until either our count > 0 or t has expired.
+ // If count > 0, decrements count and returns true. Otherwise returns false.
+ // !t.has_timeout() => Wait(t) will return true.
+ static inline bool Wait(KernelTimeout t);
+
// Permitted callers.
- friend class PerThreadSemTest;
- friend class absl::Mutex;
- friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
- friend void ReclaimThreadIdentity(void* v);
-};
-
-} // namespace synchronization_internal
+ friend class PerThreadSemTest;
+ friend class absl::Mutex;
+ friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
+ friend void ReclaimThreadIdentity(void* v);
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker. This causes it to flag weak symbol overrides as ODR
-// violations. Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
- absl::base_internal::ThreadIdentity* identity);
+ absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
- absl::synchronization_internal::KernelTimeout t);
-} // extern "C"
-
-void absl::synchronization_internal::PerThreadSem::Post(
- absl::base_internal::ThreadIdentity* identity) {
+ absl::synchronization_internal::KernelTimeout t);
+} // extern "C"
+
+void absl::synchronization_internal::PerThreadSem::Post(
+ absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
-}
-
-bool absl::synchronization_internal::PerThreadSem::Wait(
- absl::synchronization_internal::KernelTimeout t) {
+}
+
+bool absl::synchronization_internal::PerThreadSem::Wait(
+ absl::synchronization_internal::KernelTimeout t) {
return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
-}
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+}
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
index 78447e001a..0cb96dacde 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/thread_pool.h
@@ -1,93 +1,93 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-
-#include <cassert>
-#include <cstddef>
-#include <functional>
-#include <queue>
-#include <thread> // NOLINT(build/c++11)
-#include <vector>
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <queue>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// A simple ThreadPool implementation for tests.
-class ThreadPool {
- public:
- explicit ThreadPool(int num_threads) {
- for (int i = 0; i < num_threads; ++i) {
- threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
- }
- }
-
- ThreadPool(const ThreadPool &) = delete;
- ThreadPool &operator=(const ThreadPool &) = delete;
-
- ~ThreadPool() {
- {
- absl::MutexLock l(&mu_);
- for (size_t i = 0; i < threads_.size(); i++) {
- queue_.push(nullptr); // Shutdown signal.
- }
- }
- for (auto &t : threads_) {
- t.join();
- }
- }
-
- // Schedule a function to be run on a ThreadPool thread immediately.
- void Schedule(std::function<void()> func) {
- assert(func != nullptr);
- absl::MutexLock l(&mu_);
- queue_.push(std::move(func));
- }
-
- private:
- bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- return !queue_.empty();
- }
-
- void WorkLoop() {
- while (true) {
- std::function<void()> func;
- {
- absl::MutexLock l(&mu_);
- mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
- func = std::move(queue_.front());
- queue_.pop();
- }
- if (func == nullptr) { // Shutdown signal.
- break;
- }
- func();
- }
- }
-
- absl::Mutex mu_;
- std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
- std::vector<std::thread> threads_;
-};
-
-} // namespace synchronization_internal
+namespace synchronization_internal {
+
+// A simple ThreadPool implementation for tests.
+class ThreadPool {
+ public:
+ explicit ThreadPool(int num_threads) {
+ for (int i = 0; i < num_threads; ++i) {
+ threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
+ }
+ }
+
+ ThreadPool(const ThreadPool &) = delete;
+ ThreadPool &operator=(const ThreadPool &) = delete;
+
+ ~ThreadPool() {
+ {
+ absl::MutexLock l(&mu_);
+ for (size_t i = 0; i < threads_.size(); i++) {
+ queue_.push(nullptr); // Shutdown signal.
+ }
+ }
+ for (auto &t : threads_) {
+ t.join();
+ }
+ }
+
+ // Schedule a function to be run on a ThreadPool thread immediately.
+ void Schedule(std::function<void()> func) {
+ assert(func != nullptr);
+ absl::MutexLock l(&mu_);
+ queue_.push(std::move(func));
+ }
+
+ private:
+ bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return !queue_.empty();
+ }
+
+ void WorkLoop() {
+ while (true) {
+ std::function<void()> func;
+ {
+ absl::MutexLock l(&mu_);
+ mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
+ func = std::move(queue_.front());
+ queue_.pop();
+ }
+ if (func == nullptr) { // Shutdown signal.
+ break;
+ }
+ func();
+ }
+ }
+
+ absl::Mutex mu_;
+ std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
+ std::vector<std::thread> threads_;
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
index d68a525854..28ef311e4a 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.cc
@@ -1,317 +1,317 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace absl {
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <new>
+#include <type_traits>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
- base_internal::ThreadIdentity *identity =
- base_internal::CurrentThreadIdentityIfPresent();
- assert(identity != nullptr);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- const int ticker = identity->ticker.load(std::memory_order_relaxed);
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
- identity->is_idle.store(true, std::memory_order_relaxed);
- }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
- // Loop until we can atomically decrement futex from a positive
- // value, waiting on a futex while we believe it is zero.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
-
- while (true) {
- int32_t x = futex_.load(std::memory_order_relaxed);
+namespace synchronization_internal {
+
+static void MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+
+Waiter::Waiter() {
+ futex_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
while (x != 0) {
- if (!futex_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- return true; // Consumed a wakeup, we are done.
- }
-
- if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
- if (err != 0) {
- if (err == -EINTR || err == -EWOULDBLOCK) {
- // Do nothing, the loop will retry.
- } else if (err == -ETIMEDOUT) {
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- if (futex_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- // Wake one thread waiting on the futex.
- const int err = Futex::Wake(&futex_, 1);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
- explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
- const int err = pthread_mutex_lock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
- }
- }
-
- PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
- PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
- ~PthreadMutexHolder() {
- const int err = pthread_mutex_unlock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
- }
- }
-
- private:
- pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
- const int err = pthread_mutex_init(&mu_, 0);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
- }
-
- const int err2 = pthread_cond_init(&cv_, 0);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
- }
-
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-Waiter::~Waiter() {
- const int err = pthread_mutex_destroy(&mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
- }
-
- const int err2 = pthread_cond_destroy(&cv_);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
- }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- PthreadMutexHolder h(&mu_);
- ++waiter_count_;
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!t.has_timeout()) {
- const int err = pthread_cond_wait(&cv_, &mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
- }
- } else {
- const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
- if (err == ETIMEDOUT) {
- --waiter_count_;
- return false;
- }
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- PthreadMutexHolder h(&mu_);
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- PthreadMutexHolder h(&mu_);
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- const int err = pthread_cond_signal(&cv_);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
- }
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
- if (sem_init(&sem_, 0, 0) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
- }
- wakeups_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() {
- if (sem_destroy(&sem_) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
- }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- // Loop until we timeout or consume a wakeup.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (true) {
- int x = wakeups_.load(std::memory_order_relaxed);
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = Futex::WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+
+Waiter::Waiter() {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+Waiter::~Waiter() {
+ const int err = pthread_mutex_destroy(&mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_destroy(&cv_);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+
+Waiter::Waiter() {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+ wakeups_.store(0, std::memory_order_relaxed);
+}
+
+Waiter::~Waiter() {
+ if (sem_destroy(&sem_) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
+ }
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
while (x != 0) {
- if (!wakeups_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- // Successfully consumed a wakeup, we're done.
- return true;
- }
-
- if (!first_pass) MaybeBecomeIdle();
- // Nothing to consume, wait (looping on EINTR).
- while (true) {
- if (!t.has_timeout()) {
- if (sem_wait(&sem_) == 0) break;
- if (errno == EINTR) continue;
- ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
- } else {
- if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
- if (errno == EINTR) continue;
- if (errno == ETIMEDOUT) return false;
- ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- // Post a wakeup.
- if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
- ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
- static SRWLOCK *GetLock(Waiter *w) {
- return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
- }
-
- static CONDITION_VARIABLE *GetCond(Waiter *w) {
- return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
- }
-
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void Waiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+
+class Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
static_assert(sizeof(SRWLOCK) == sizeof(void *),
"`mu_storage_` does not have the same size as SRWLOCK");
static_assert(alignof(SRWLOCK) == alignof(void *),
@@ -320,109 +320,109 @@ class Waiter::WinHelper {
static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
"`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
"as `CONDITION_VARIABLE`");
- static_assert(
+ static_assert(
alignof(CONDITION_VARIABLE) == alignof(void *),
"`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
- // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
- // and destructible because we never call their constructors or destructors.
- static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
"The `SRWLOCK` type must be trivially constructible");
static_assert(
std::is_trivially_constructible<CONDITION_VARIABLE>::value,
"The `CONDITION_VARIABLE` type must be trivially constructible");
- static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
"The `SRWLOCK` type must be trivially destructible");
- static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
"The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
- explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
- AcquireSRWLockExclusive(mu_);
- }
-
- LockHolder(const LockHolder&) = delete;
- LockHolder& operator=(const LockHolder&) = delete;
-
- ~LockHolder() {
- ReleaseSRWLockExclusive(mu_);
- }
-
- private:
- SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
- auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
- auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
- InitializeSRWLock(mu);
- InitializeConditionVariable(cv);
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
- SRWLOCK *mu = WinHelper::GetLock(this);
- CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
- LockHolder h(mu);
- ++waiter_count_;
-
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
- if (err == ERROR_TIMEOUT) {
- --waiter_count_;
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- LockHolder h(WinHelper::GetLock(this));
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- LockHolder h(WinHelper::GetLock(this));
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- WakeConditionVariable(WinHelper::GetCond(this));
- }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-} // namespace synchronization_internal
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Waiter::Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+// SRW locks and condition variables do not need to be explicitly destroyed.
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+Waiter::~Waiter() = default;
+
+bool Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+#else
+#error Unknown ABSL_WAITER_MODE
+#endif
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
+} // namespace absl
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
index 5d0ad76461..be3df180d4 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/waiter.h
@@ -1,155 +1,155 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#else
+#include <pthread.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/futex.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
-#define ABSL_WAITER_MODE_FUTEX 0
-#define ABSL_WAITER_MODE_SEM 1
-#define ABSL_WAITER_MODE_CONDVAR 2
-#define ABSL_WAITER_MODE_WIN32 3
-
-#if defined(ABSL_FORCE_WAITER_MODE)
-#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
+#define ABSL_WAITER_MODE_FUTEX 0
+#define ABSL_WAITER_MODE_SEM 1
+#define ABSL_WAITER_MODE_CONDVAR 2
+#define ABSL_WAITER_MODE_WIN32 3
+
+#if defined(ABSL_FORCE_WAITER_MODE)
+#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
+#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
-#endif
-
-namespace absl {
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
+#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
+#else
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#endif
+
+namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
- // Prepare any data to track waits.
- Waiter();
-
- // Not copyable or movable
- Waiter(const Waiter&) = delete;
- Waiter& operator=(const Waiter&) = delete;
-
- // Destroy any data to track waits.
- ~Waiter();
-
- // Blocks the calling thread until a matching call to `Post()` or
- // `t` has passed. Returns `true` if woken (`Post()` called),
- // `false` on timeout.
- bool Wait(KernelTimeout t);
-
- // Restart the caller of `Wait()` as with a normal semaphore.
- void Post();
-
- // If anyone is waiting, wake them up temporarily and cause them to
- // call `MaybeBecomeIdle()`. They will then return to waiting for a
- // `Post()` or timeout.
- void Poke();
-
- // Returns the Waiter associated with the identity.
- static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
- static_assert(
- sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
- "Insufficient space for Waiter");
- return reinterpret_cast<Waiter*>(identity->waiter_state.data);
- }
-
- // How many periods to remain idle before releasing resources
+namespace synchronization_internal {
+
+// Waiter is an OS-specific semaphore.
+class Waiter {
+ public:
+ // Prepare any data to track waits.
+ Waiter();
+
+ // Not copyable or movable
+ Waiter(const Waiter&) = delete;
+ Waiter& operator=(const Waiter&) = delete;
+
+ // Destroy any data to track waits.
+ ~Waiter();
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ void Poke();
+
+ // Returns the Waiter associated with the identity.
+ static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<Waiter*>(identity->waiter_state.data);
+ }
+
+ // How many periods to remain idle before releasing resources
#ifndef ABSL_HAVE_THREAD_SANITIZER
static constexpr int kIdlePeriods = 60;
-#else
- // Memory consumption under ThreadSanitizer is a serious concern,
- // so we release resources sooner. The value of 1 leads to 1 to 2 second
- // delay before marking a thread as idle.
- static const int kIdlePeriods = 1;
-#endif
-
- private:
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be 32-bits.
- // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
- std::atomic<int32_t> futex_;
- static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
- // REQUIRES: mu_ must be held.
- void InternalCondVarPoke();
-
- pthread_mutex_t mu_;
- pthread_cond_t cv_;
- int waiter_count_;
- int wakeup_count_; // Unclaimed wakeups.
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
- sem_t sem_;
- // This seems superfluous, but for Poke() we need to cause spurious
- // wakeups on the semaphore. Hence we can't actually use the
- // semaphore's count.
- std::atomic<int> wakeups_;
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
- // WinHelper - Used to define utilities for accessing the lock and
- // condition variable storage once the types are complete.
- class WinHelper;
-
- // REQUIRES: WinHelper::GetLock(this) must be held.
- void InternalCondVarPoke();
-
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static const int kIdlePeriods = 1;
+#endif
+
+ private:
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+ sem_t sem_;
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
// We can't include Windows.h in our headers, so we use aligned charachter
// buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
alignas(void*) unsigned char mu_storage_[sizeof(void*)];
alignas(void*) unsigned char cv_storage_[sizeof(void*)];
- int waiter_count_;
- int wakeup_count_;
-
-#else
- #error Unknown ABSL_WAITER_MODE
-#endif
-};
-
-} // namespace synchronization_internal
+ int waiter_count_;
+ int wakeup_count_;
+
+#else
+ #error Unknown ABSL_WAITER_MODE
+#endif
+};
+
+} // namespace synchronization_internal
ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
index b4cbb122ab..40f72cf665 100644
--- a/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
+++ b/contrib/restricted/abseil-cpp/absl/synchronization/internal/ya.make
@@ -1,35 +1,35 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(g:cpp-contrib)
-
-LICENSE(Apache-2.0)
-
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/restricted/abseil-cpp/absl/base
- contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
- contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
- contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
- contrib/restricted/abseil-cpp/absl/base/log_severity
-)
-
-ADDINCL(
- GLOBAL contrib/restricted/abseil-cpp
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
+PEERDIR(
+ contrib/restricted/abseil-cpp/absl/base
+ contrib/restricted/abseil-cpp/absl/base/internal/low_level_alloc
+ contrib/restricted/abseil-cpp/absl/base/internal/raw_logging
+ contrib/restricted/abseil-cpp/absl/base/internal/spinlock_wait
+ contrib/restricted/abseil-cpp/absl/base/log_severity
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/abseil-cpp
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
CFLAGS(
-DNOMINMAX
)
-SRCS(
- graphcycles.cc
-)
-
-END()
+SRCS(
+ graphcycles.cc
+)
+
+END()