aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:43 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:43 +0300
commit397cbe258b9e064f49c4ca575279f02f39fef76e (patch)
treea0b0eb3cca6a14e4e8ea715393637672fa651284 /contrib/restricted/abseil-cpp-tstring/y_absl/synchronization
parent43f5a35593ebc9f6bcea619bb170394ea7ae468e (diff)
downloadydb-397cbe258b9e064f49c4ca575279f02f39fef76e.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/restricted/abseil-cpp-tstring/y_absl/synchronization')
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt32
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt68
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc6
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h308
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h58
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h10
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc2
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h8
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make12
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc272
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h176
-rw-r--r--contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make12
14 files changed, 488 insertions, 488 deletions
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt
index 7be6b42848..4335862815 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/.yandex_meta/licenses.list.txt
@@ -1,16 +1,16 @@
-====================Apache-2.0====================
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-
-====================COPYRIGHT====================
-// Copyright 2017 The Abseil Authors.
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt
index b94f79902f..4ae3f0cbbc 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/.yandex_meta/licenses.list.txt
@@ -1,34 +1,34 @@
-====================Apache-2.0====================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-
-
-====================Apache-2.0====================
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-
-====================COPYRIGHT====================
-// Copyright 2017 The Abseil Authors.
-
-
-====================COPYRIGHT====================
-// Copyright 2020 The Abseil Authors.
+====================Apache-2.0====================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+====================COPYRIGHT====================
+// Copyright 2017 The Abseil Authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2020 The Abseil Authors.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
index 3c5764eeb9..b81c03ea03 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/create_thread_identity.cc
@@ -32,9 +32,9 @@ namespace synchronization_internal {
// ThreadIdentity storage is persistent, we maintain a free-list of previously
// released ThreadIdentity objects.
-ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
+ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
// Since we must preserve their storage we cache them for re-use.
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
index aea769d8ec..99e06d0d38 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/futex.h
@@ -1,154 +1,154 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-
-#include "y_absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cstdint>
-
-#include "y_absl/base/optimization.h"
-#include "y_absl/synchronization/internal/kernel_timeout.h"
-
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
-#elif defined(__BIONIC__)
-// Bionic supports all the futex operations we need even when some of the futex
-// definitions are missing.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
-// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-
-namespace y_absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_WAIT_BITSET
-#define FUTEX_WAIT_BITSET 9
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#ifndef FUTEX_CLOCK_REALTIME
-#define FUTEX_CLOCK_REALTIME 256
-#endif
-#ifndef FUTEX_BITSET_MATCH_ANY
-#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
-#endif
-#endif
-
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
-class FutexImpl {
- public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- int err = 0;
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (ABSL_PREDICT_FALSE(err != 0)) {
- err = -errno;
- }
- return err;
- }
-
- static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
- int32_t bits,
- const struct timespec *abstime) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- err = -errno;
- }
- return err;
- }
-
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- err = -errno;
- }
- return err;
- }
-
- // FUTEX_WAKE_BITSET
- static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- err = -errno;
- }
- return err;
- }
-};
-
-class Futex : public FutexImpl {};
-
-} // namespace synchronization_internal
-ABSL_NAMESPACE_END
-} // namespace y_absl
-
-#endif // ABSL_INTERNAL_HAVE_FUTEX
-
-#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+
+#include "y_absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "y_absl/base/optimization.h"
+#include "y_absl/synchronization/internal/kernel_timeout.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
+#elif defined(__BIONIC__)
+// Bionic supports all the futex operations we need even when some of the futex
+// definitions are missing.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
+// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace y_absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+class FutexImpl {
+ public:
+ static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ KernelTimeout t) {
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ }
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
+ int32_t bits,
+ const struct timespec *abstime) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int Wake(std::atomic<int32_t> *v, int32_t count) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ // FUTEX_WAKE_BITSET
+ static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+};
+
+class Futex : public FutexImpl {};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace y_absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
index d8987c1a98..8f922299a0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/graphcycles.cc
@@ -37,7 +37,7 @@
#include <algorithm>
#include <array>
-#include <limits>
+#include <limits>
#include "y_absl/base/internal/hide_ptr.h"
#include "y_absl/base/internal/raw_logging.h"
#include "y_absl/base/internal/spinlock.h"
@@ -52,9 +52,9 @@ namespace {
// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
// which people are doing things like acquiring Mutexes.
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock arena_mu(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock arena_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
static void InitArenaIfNecessary() {
arena_mu.Lock();
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
index ee4078702d..70efeaf72d 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/kernel_timeout.h
@@ -26,7 +26,7 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#include <time.h>
-
+
#include <algorithm>
#include <limits>
@@ -58,10 +58,10 @@ class KernelTimeout {
bool has_timeout() const { return ns_ != 0; }
- // Convert to parameter for sem_timedwait/futex/similar. Only for approved
- // users. Do not call if !has_timeout.
- struct timespec MakeAbsTimespec();
-
+ // Convert to parameter for sem_timedwait/futex/similar. Only for approved
+ // users. Do not call if !has_timeout.
+ struct timespec MakeAbsTimespec();
+
private:
// internal rep, not user visible: ns after unix epoch.
// zero = no timeout.
@@ -125,30 +125,30 @@ class KernelTimeout {
friend class Waiter;
};
-inline struct timespec KernelTimeout::MakeAbsTimespec() {
- int64_t n = ns_;
- static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
- if (n == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timespec from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- n = (std::numeric_limits<int64_t>::max)();
- }
-
- // Kernel APIs validate timespecs as being at or after the epoch,
- // despite the kernel time type being signed. However, no one can
- // tell the difference between a timeout at or before the epoch (since
- // all such timeouts have expired!)
- if (n < 0) n = 0;
-
- struct timespec abstime;
- int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
- abstime.tv_sec = static_cast<time_t>(seconds);
- abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
- return abstime;
-}
-
+inline struct timespec KernelTimeout::MakeAbsTimespec() {
+ int64_t n = ns_;
+ static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
+ if (n == 0) {
+ ABSL_RAW_LOG(
+ ERROR, "Tried to create a timespec from a non-timeout; never do this.");
+ // But we'll try to continue sanely. no-timeout ~= saturated timeout.
+ n = (std::numeric_limits<int64_t>::max)();
+ }
+
+ // Kernel APIs validate timespecs as being at or after the epoch,
+ // despite the kernel time type being signed. However, no one can
+ // tell the difference between a timeout at or before the epoch (since
+ // all such timeouts have expired!)
+ if (n < 0) n = 0;
+
+ struct timespec abstime;
+ int64_t seconds = (std::min)(n / kNanosPerSecond,
+ int64_t{(std::numeric_limits<time_t>::max)()});
+ abstime.tv_sec = static_cast<time_t>(seconds);
+ abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
+ return abstime;
+}
+
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace y_absl
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
index 3e40e812a9..245bd110d0 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.cc
@@ -68,12 +68,12 @@ ABSL_NAMESPACE_END
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
y_absl::base_internal::ThreadIdentity *identity) {
y_absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
-ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
y_absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
y_absl::base_internal::ThreadIdentity *identity;
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
index 2fc39ca130..bd1bac4140 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/per_thread_sem.h
@@ -78,7 +78,7 @@ class PerThreadSem {
// !t.has_timeout() => Wait(t) will return true.
static inline bool Wait(KernelTimeout t);
- // Permitted callers.
+ // Permitted callers.
friend class PerThreadSemTest;
friend class y_absl::Mutex;
friend y_absl::base_internal::ThreadIdentity* CreateThreadIdentity();
@@ -96,20 +96,20 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
y_absl::base_internal::ThreadIdentity* identity);
-bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
+bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
y_absl::synchronization_internal::KernelTimeout t);
} // extern "C"
void y_absl::synchronization_internal::PerThreadSem::Post(
y_absl::base_internal::ThreadIdentity* identity) {
- ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
}
bool y_absl::synchronization_internal::PerThreadSem::Wait(
y_absl::synchronization_internal::KernelTimeout t) {
- return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
+ return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
}
#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
index 573ce9c6c5..d89c3e9ab1 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.cc
@@ -48,7 +48,7 @@
#include "y_absl/base/optimization.h"
#include "y_absl/synchronization/internal/kernel_timeout.h"
-
+
namespace y_absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
index e6fede411f..611f9d25f5 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/waiter.h
@@ -36,7 +36,7 @@
#include <cstdint>
#include "y_absl/base/internal/thread_identity.h"
-#include "y_absl/synchronization/internal/futex.h"
+#include "y_absl/synchronization/internal/futex.h"
#include "y_absl/synchronization/internal/kernel_timeout.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
@@ -49,7 +49,7 @@
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
#elif defined(ABSL_HAVE_SEMAPHORE_H)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
@@ -96,8 +96,8 @@ class Waiter {
}
// How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
- static constexpr int kIdlePeriods = 60;
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
#else
// Memory consumption under ThreadSanitizer is a serious concern,
// so we release resources sooner. The value of 1 leads to 1 to 2 second
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make
index 167e710a78..6769fbcbd2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/internal/ya.make
@@ -2,15 +2,15 @@
LIBRARY()
-OWNER(
- somov
- g:cpp-contrib
-)
+OWNER(
+ somov
+ g:cpp-contrib
+)
LICENSE(Apache-2.0)
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
PEERDIR(
contrib/restricted/abseil-cpp-tstring/y_absl/base
contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
index babe5c4ada..75f4bbfae2 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.cc
@@ -39,7 +39,7 @@
#include <thread> // NOLINT(build/c++11)
#include "y_absl/base/attributes.h"
-#include "y_absl/base/call_once.h"
+#include "y_absl/base/call_once.h"
#include "y_absl/base/config.h"
#include "y_absl/base/dynamic_annotations.h"
#include "y_absl/base/internal/atomic_hook.h"
@@ -50,7 +50,7 @@
#include "y_absl/base/internal/spinlock.h"
#include "y_absl/base/internal/sysinfo.h"
#include "y_absl/base/internal/thread_identity.h"
-#include "y_absl/base/internal/tsan_mutex_interface.h"
+#include "y_absl/base/internal/tsan_mutex_interface.h"
#include "y_absl/base/port.h"
#include "y_absl/debugging/stacktrace.h"
#include "y_absl/debugging/symbolize.h"
@@ -60,7 +60,7 @@
using y_absl::base_internal::CurrentThreadIdentityIfPresent;
using y_absl::base_internal::PerThreadSynch;
-using y_absl::base_internal::SchedulingGuard;
+using y_absl::base_internal::SchedulingGuard;
using y_absl::base_internal::ThreadIdentity;
using y_absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using y_absl::synchronization_internal::GraphCycles;
@@ -70,9 +70,9 @@ using y_absl::synchronization_internal::KernelTimeout;
using y_absl::synchronization_internal::PerThreadSem;
extern "C" {
-ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
- std::this_thread::yield();
-}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+ std::this_thread::yield();
+}
} // extern "C"
namespace y_absl {
@@ -80,7 +80,7 @@ ABSL_NAMESPACE_BEGIN
namespace {
-#if defined(ABSL_HAVE_THREAD_SANITIZER)
+#if defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
#else
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
@@ -91,8 +91,8 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
- submit_profile_data;
+y_absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES y_absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)>
mutex_tracer;
@@ -127,63 +127,63 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
}
namespace {
-// Represents the strategy for spin and yield.
-// See the comment in GetMutexGlobals() for more information.
-enum DelayMode { AGGRESSIVE, GENTLE };
-
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
- y_absl::once_flag once;
- int spinloop_iterations = 0;
- int32_t mutex_sleep_limit[2] = {};
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
+
+struct ABSL_CACHELINE_ALIGNED MutexGlobals {
+ y_absl::once_flag once;
+ int spinloop_iterations = 0;
+ int32_t mutex_sleep_limit[2] = {};
};
-
-const MutexGlobals &GetMutexGlobals() {
- ABSL_CONST_INIT static MutexGlobals data;
- y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = y_absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
- data.mutex_sleep_limit[AGGRESSIVE] = 5000;
- data.mutex_sleep_limit[GENTLE] = 250;
- } else {
- data.mutex_sleep_limit[AGGRESSIVE] = 0;
- data.mutex_sleep_limit[GENTLE] = 0;
- }
- });
- return data;
-}
-} // namespace
-
-namespace synchronization_internal {
-// Returns the Mutex delay on iteration `c` depending on the given `mode`.
-// The returned value should be used as `c` for the next call to `MutexDelay`.
-int MutexDelay(int32_t c, int mode) {
- const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
+
+const MutexGlobals &GetMutexGlobals() {
+ ABSL_CONST_INIT static MutexGlobals data;
+ y_absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
+ const int num_cpus = y_absl::base_internal::NumCPUs();
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ if (num_cpus > 1) {
+ data.mutex_sleep_limit[AGGRESSIVE] = 5000;
+ data.mutex_sleep_limit[GENTLE] = 250;
+ } else {
+ data.mutex_sleep_limit[AGGRESSIVE] = 0;
+ data.mutex_sleep_limit[GENTLE] = 0;
+ }
+ });
+ return data;
+}
+} // namespace
+
+namespace synchronization_internal {
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
+int MutexDelay(int32_t c, int mode) {
+ const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
- // Spin.
- c++;
+ // Spin.
+ c++;
} else {
- SchedulingGuard::ScopedEnable enable_rescheduling;
+ SchedulingGuard::ScopedEnable enable_rescheduling;
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) {
- // Yield once.
- ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+ if (c == limit) {
+ // Yield once.
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
- } else {
- // Then wait.
+ } else {
+ // Then wait.
y_absl::SleepFor(y_absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return c;
+ return c;
}
-} // namespace synchronization_internal
+} // namespace synchronization_internal
// --------------------------Generic atomic ops
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
@@ -221,12 +221,12 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
//------------------------------------------------------------------
// Data for doing deadlock detection.
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock deadlock_graph_mu(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock deadlock_graph_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
- ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
+// Graph used to detect deadlocks.
+ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
// An event mechanism for debugging mutex use.
@@ -287,12 +287,12 @@ static const struct {
{0, "SignalAll on "},
};
-ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
- y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static y_absl::base_internal::SpinLock synch_event_mu(
+ y_absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Hash table size; should be prime > 2.
// Can't be too small, as it's used for deadlock detection information.
-static constexpr uint32_t kNSynchEvent = 1031;
+static constexpr uint32_t kNSynchEvent = 1031;
static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
@@ -312,7 +312,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
+ char name[1]; // actually longer---NUL-terminated string
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
@@ -503,7 +503,7 @@ struct SynchWaitParams {
std::atomic<intptr_t> *cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
- // to contend for the mutex.
+ // to contend for the mutex.
};
struct SynchLocksHeld {
@@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -717,7 +717,7 @@ static constexpr bool kDebugMode = false;
static constexpr bool kDebugMode = true;
#endif
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
static unsigned TsanFlags(Mutex::MuHow how) {
return how == kShared ? __tsan_mutex_read_lock : 0;
}
@@ -763,13 +763,13 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
-// Return true iff threads x and y are part of the same equivalence
-// class of waiters. An equivalence class is defined as the set of
-// waiters with the same condition, type of lock, and thread priority.
-//
-// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
- return x->waitp->how == y->waitp->how && x->priority == y->priority &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
@@ -788,19 +788,19 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// - invalid (iff x is not in a Mutex wait queue),
// - null, or
// - a pointer to a distinct thread waiting later in the same Mutex queue
-// such that all threads in [x, x->skip] have the same condition, priority
-// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
-// x->skip]).
+// such that all threads in [x, x->skip] have the same condition, priority
+// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+// x->skip]).
// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
//
-// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
// first runnable thread y from the front a Mutex queue to adjust the skip
// field of another thread x because if x->skip==y, x->skip must (have) become
// invalid before y is removed. The function TryRemove can remove a specified
// thread from an arbitrary position in the queue whether runnable or not, so
// it fixes up skip fields that would otherwise be left dangling.
// The statement
-// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
+// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
// maintains the invariant provided x is not the last waiter in a Mutex queue
// The statement
// if (x->skip != null) { x->skip = x->skip->skip; }
@@ -934,17 +934,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
- // No unlocker can be scanning the queue, so we can insert into the
- // middle of the queue.
- //
- // Within a skip chain, all waiters have the same priority, so we can
- // skip forward through the chains until we find one with a lower
- // priority than the waiter to be enqueued.
+ // No unlocker can be scanning the queue, so we can insert into the
+ // middle of the queue.
+ //
+ // Within a skip chain, all waiters have the same priority, so we can
+ // skip forward through the chains until we find one with a lower
+ // priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
- // (side-effect: optimizes skip chain)
- advance_to = Skip(enqueue_after->next);
+ // (side-effect: optimizes skip chain)
+ advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
// termination guaranteed because s->priority > head->priority
// and head is the end of a skip chain
@@ -963,21 +963,21 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// enqueue_after can be: head, Skip(...), or cur.
// The first two imply enqueue_after->skip == nullptr, and
- // the last is used only if MuEquivalentWaiter(s, cur).
+ // the last is used only if MuEquivalentWaiter(s, cur).
// We require this because clearing enqueue_after->skip
// is impossible; enqueue_after's predecessors might also
// incorrectly skip over s if we were to allow other
// insertion points.
- ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
- MuEquivalentWaiter(enqueue_after, s),
- "Mutex Enqueue failure");
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+ MuEquivalentWaiter(enqueue_after, s),
+ "Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
- MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
// enqueue_after can skip to its new successor, s
enqueue_after->skip = enqueue_after->next;
}
- if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
} else { // enqueue not done any other way, so
@@ -987,7 +987,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
- if (head->may_skip && MuEquivalentWaiter(head, s)) {
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
// head now has successor; may skip
head->skip = s;
}
@@ -1007,7 +1007,7 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
pw->next = w->next; // snip w out of list
if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
- } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
@@ -1064,7 +1064,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1077,13 +1077,13 @@ void Mutex::TryRemove(PerThreadSynch *s) {
PerThreadSynch *w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
- // If the current element isn't equivalent to the waiter to be
- // removed, we can skip the entire chain.
- if (!MuEquivalentWaiter(s, w)) {
+ // If the current element isn't equivalent to the waiter to be
+ // removed, we can skip the entire chain.
+ if (!MuEquivalentWaiter(s, w)) {
pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
- // because they are in a different equivalence class.
+ // because they are in a different equivalence class.
} else { // seeking same condition
FixSkip(w, s); // fix up any skip pointer from w to s
pw = w;
@@ -1131,7 +1131,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
this->TryRemove(s);
int c = 0;
while (s->next != nullptr) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
this->TryRemove(s);
}
if (kDebugMode) {
@@ -1374,9 +1374,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
len += static_cast<int>(strlen(&b->buf[len]));
}
}
- ABSL_RAW_LOG(ERROR,
- "Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
- "historical lock ordering graph has been observed",
+ ABSL_RAW_LOG(ERROR,
+ "Acquiring y_absl::Mutex %p while holding %s; a cycle in the "
+ "historical lock ordering graph has been observed",
static_cast<void *>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
@@ -1452,19 +1452,19 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = GetMutexGlobals().spinloop_iterations;
+ int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader|kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- return true;
+ return true;
}
- } while (--c > 0);
- return false;
+ } while (--c > 0);
+ return false;
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
@@ -1763,8 +1763,8 @@ static const intptr_t ignore_waiting_writers[] = {
};
// Internal version of LockWhen(). See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
- int flags) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
"condition untrue on return from LockSlow");
@@ -1779,7 +1779,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// All memory accesses are ignored inside of mutex operations + for unlock
// operation tsan considers that we've already released the mutex.
bool res = false;
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
const int flags = read_lock ? __tsan_mutex_read_lock : 0;
const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
#endif
@@ -1829,9 +1829,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// So we "divert" (which un-ignores both memory accesses and synchronization)
// and then separately turn on ignores of memory accesses.
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
- ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
return res;
@@ -1912,7 +1912,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2014,8 +2014,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
- // delay, then try again
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // delay, then try again
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -2032,8 +2032,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2150,7 +2150,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
!old_h->may_skip) { // we used old_h as a terminator
old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
- if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
}
}
@@ -2310,8 +2310,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.store(nv, std::memory_order_release);
break; // out of for(;;)-loop
}
- // aggressive here; no one can proceed till we do
- c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
+ // aggressive here; no one can proceed till we do
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
@@ -2323,8 +2323,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (!cond_waiter) {
// Sample lock contention events only if the (first) waiter was trying to
// acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles =
- base_internal::CycleClock::Now() - enqueue_timestamp;
+ int64_t wait_cycles =
+ base_internal::CycleClock::Now() - enqueue_timestamp;
mutex_tracer("slow release", this, wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(enqueue_timestamp);
@@ -2351,7 +2351,7 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2401,7 +2401,7 @@ void Mutex::Fer(PerThreadSynch *w) {
return;
}
}
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
@@ -2450,7 +2450,7 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2479,8 +2479,8 @@ void CondVar::Remove(PerThreadSynch *s) {
std::memory_order_release);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
}
@@ -2513,7 +2513,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
!cv_word->compare_exchange_weak(v, v | kCvSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
@@ -2612,7 +2612,7 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
- SchedulingGuard::ScopedDisable disable_rescheduling;
+ SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2645,7 +2645,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2682,8 +2682,8 @@ void CondVar::SignalAll () {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
return;
} else {
- // try again after a delay
- c = synchronization_internal::MutexDelay(c, GENTLE);
+ // try again after a delay
+ c = synchronization_internal::MutexDelay(c, GENTLE);
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
@@ -2696,7 +2696,7 @@ void ReleasableMutexLock::Release() {
this->mu_ = nullptr;
}
-#ifdef ABSL_HAVE_THREAD_SANITIZER
+#ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
index 0762a852df..f8661ad8b9 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/mutex.h
@@ -31,23 +31,23 @@
//
// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
// write access within the current scope.
-//
+//
// ReaderMutexLock
// - An RAII wrapper to acquire and release a `Mutex` for shared/read
// access within the current scope.
//
// WriterMutexLock
-// - Effectively an alias for `MutexLock` above, designed for use in
-// distinguishing reader and writer locks within code.
+// - Effectively an alias for `MutexLock` above, designed for use in
+// distinguishing reader and writer locks within code.
//
// In addition to simple mutex locks, this file also defines ways to perform
// locking under certain conditions.
//
-// Condition - (Preferred) Used to wait for a particular predicate that
-// depends on state protected by the `Mutex` to become true.
-// CondVar - A lower-level variant of `Condition` that relies on
-// application code to explicitly signal the `CondVar` when
-// a condition has been met.
+// Condition - (Preferred) Used to wait for a particular predicate that
+// depends on state protected by the `Mutex` to become true.
+// CondVar - A lower-level variant of `Condition` that relies on
+// application code to explicitly signal the `CondVar` when
+// a condition has been met.
//
// See below for more information on using `Condition` or `CondVar`.
//
@@ -147,7 +147,7 @@ class ABSL_LOCKABLE Mutex {
//
// Example usage:
// namespace foo {
- // ABSL_CONST_INIT y_absl::Mutex mu(y_absl::kConstInit);
+ // ABSL_CONST_INIT y_absl::Mutex mu(y_absl::kConstInit);
// }
explicit constexpr Mutex(y_absl::ConstInitType);
@@ -162,7 +162,7 @@ class ABSL_LOCKABLE Mutex {
// Mutex::Unlock()
//
// Releases this `Mutex` and returns it from the exclusive/write state to the
- // free state. Calling thread must hold the `Mutex` exclusively.
+ // free state. Calling thread must hold the `Mutex` exclusively.
void Unlock() ABSL_UNLOCK_FUNCTION();
// Mutex::TryLock()
@@ -323,16 +323,16 @@ class ABSL_LOCKABLE Mutex {
// Mutex::AwaitWithTimeout()
// Mutex::AwaitWithDeadline()
//
- // Unlocks this `Mutex` and blocks until simultaneously:
+ // Unlocks this `Mutex` and blocks until simultaneously:
// - either `cond` is true or the {timeout has expired, deadline has passed}
// and
// - this `Mutex` can be reacquired,
// then reacquire this `Mutex` in the same mode in which it was previously
// held, returning `true` iff `cond` is `true` on return.
//
- // If the condition is initially `true`, the implementation *may* skip the
- // release/re-acquire step and return immediately.
- //
+ // If the condition is initially `true`, the implementation *may* skip the
+ // release/re-acquire step and return immediately.
+ //
// Deadlines in the past are equivalent to an immediate deadline.
// Negative timeouts are equivalent to a zero timeout.
//
@@ -457,9 +457,9 @@ class ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
- static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
- synchronization_internal::KernelTimeout t);
+ static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
+ static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ synchronization_internal::KernelTimeout t);
// slow path acquire
void LockSlowLoop(SynchWaitParams *waitp, int flags);
@@ -505,36 +505,36 @@ class ABSL_LOCKABLE Mutex {
// Example:
//
// Class Foo {
-// public:
+// public:
// Foo::Bar* Baz() {
-// MutexLock lock(&mu_);
+// MutexLock lock(&mu_);
// ...
// return bar;
// }
//
// private:
-// Mutex mu_;
+// Mutex mu_;
// };
class ABSL_SCOPED_LOCKABLE MutexLock {
public:
- // Constructors
-
- // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
- // guaranteed to be locked when this object is constructed. Requires that
- // `mu` be dereferenceable.
+ // Constructors
+
+ // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
+ // guaranteed to be locked when this object is constructed. Requires that
+ // `mu` be dereferenceable.
explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
- // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
- // the above, the condition given by `cond` is also guaranteed to hold when
- // this object is constructed.
- explicit MutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- this->mu_->LockWhen(cond);
- }
-
+ // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
+ // the above, the condition given by `cond` is also guaranteed to hold when
+ // this object is constructed.
+ explicit MutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
@@ -556,12 +556,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
- ABSL_SHARED_LOCK_FUNCTION(mu)
- : mu_(mu) {
- mu->ReaderLockWhen(cond);
- }
-
+ explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->ReaderLockWhen(cond);
+ }
+
ReaderMutexLock(const ReaderMutexLock&) = delete;
ReaderMutexLock(ReaderMutexLock&&) = delete;
ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
@@ -584,12 +584,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- mu->WriterLockWhen(cond);
- }
-
+ explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLockWhen(cond);
+ }
+
WriterMutexLock(const WriterMutexLock&) = delete;
WriterMutexLock(WriterMutexLock&&) = delete;
WriterMutexLock& operator=(const WriterMutexLock&) = delete;
@@ -628,27 +628,27 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
// `noexcept`; until then this requirement cannot be enforced in the
// type system.)
//
-// Note: to use a `Condition`, you need only construct it and pass it to a
-// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
-// constructor of one of the scope guard classes.
+// Note: to use a `Condition`, you need only construct it and pass it to a
+// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
+// constructor of one of the scope guard classes.
//
-// Example using LockWhen/Unlock:
+// Example using LockWhen/Unlock:
//
// // assume count_ is not internal reference count
// int count_ ABSL_GUARDED_BY(mu_);
-// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
-//
-// mu_.LockWhen(count_is_zero);
-// // ...
-// mu_.Unlock();
-//
-// Example using a scope guard:
-//
-// {
-// MutexLock lock(&mu_, count_is_zero);
-// // ...
-// }
-//
+// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
+//
+// mu_.LockWhen(count_is_zero);
+// // ...
+// mu_.Unlock();
+//
+// Example using a scope guard:
+//
+// {
+// MutexLock lock(&mu_, count_is_zero);
+// // ...
+// }
+//
// When multiple threads are waiting on exactly the same condition, make sure
// that they are constructed with the same parameters (same pointer to function
// + arg, or same pointer to object + method), so that the mutex implementation
@@ -701,11 +701,11 @@ class Condition {
// return processed_ >= current;
// };
// mu_.Await(Condition(&reached));
- //
- // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
- // the lambda as it may be called when the mutex is being unlocked from a
- // scope holding only a reader lock, which will make the assertion not
- // fulfilled and crash the binary.
+ //
+ // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
+ // the lambda as it may be called when the mutex is being unlocked from a
+ // scope holding only a reader lock, which will make the assertion not
+ // fulfilled and crash the binary.
// See class comment for performance advice. In particular, if there
// might be more than one waiter for the same condition, make sure
@@ -790,8 +790,8 @@ class Condition {
//
class CondVar {
public:
- // A `CondVar` allocated on the heap or on the stack can use the this
- // constructor.
+ // A `CondVar` allocated on the heap or on the stack can use the this
+ // constructor.
CondVar();
~CondVar();
@@ -879,15 +879,15 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
this->mu_->Lock();
}
}
-
- explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- if (this->mu_ != nullptr) {
- this->mu_->LockWhen(cond);
- }
- }
-
+
+ explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->LockWhen(cond);
+ }
+ }
+
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -910,13 +910,13 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
: mu_(mu) {
this->mu_->Lock();
}
-
- explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
- ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
- this->mu_->LockWhen(cond);
- }
-
+
+ explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -1005,7 +1005,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
//
// This has the same memory ordering concerns as RegisterMutexProfiler() above.
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
- int64_t wait_cycles));
+ int64_t wait_cycles));
// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
// into a single interface, since they are only ever called in pairs.
@@ -1076,7 +1076,7 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
} // extern "C"
#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
index 860fd01b0f..727a63eb92 100644
--- a/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
+++ b/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/ya.make
@@ -2,15 +2,15 @@
LIBRARY()
-OWNER(
- somov
- g:cpp-contrib
-)
+OWNER(
+ somov
+ g:cpp-contrib
+)
LICENSE(Apache-2.0)
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
PEERDIR(
contrib/restricted/abseil-cpp-tstring/y_absl/base
contrib/restricted/abseil-cpp-tstring/y_absl/base/internal/low_level_alloc