aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhiddenpath <hiddenpath@yandex-team.com>2024-01-31 11:50:49 +0300
committerhiddenpath <hiddenpath@yandex-team.com>2024-01-31 12:11:04 +0300
commit0ff1123e2bd6d1424be6f5d21ad6f390b4fd5ac8 (patch)
tree495513bd2f895ddcd79f7ab280a3541672fdfab6
parentd7b42f12f66a12f323fd46e48246e1693bc4c6e9 (diff)
downloadydb-0ff1123e2bd6d1424be6f5d21ad6f390b4fd5ac8.tar.gz
Update libcxxrt to 2023-10-11 03c83f5a57be8c5b1a29a68de5638744f17d28ba
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/atomic.h110
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/cxxabi.h15
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/exception.cc117
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/guard.cc373
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/memory.cc7
-rw-r--r--contrib/libs/cxxsupp/libcxxrt/ya.make15
6 files changed, 474 insertions, 163 deletions
diff --git a/contrib/libs/cxxsupp/libcxxrt/atomic.h b/contrib/libs/cxxsupp/libcxxrt/atomic.h
index afdbdd04d5..701d05337c 100644
--- a/contrib/libs/cxxsupp/libcxxrt/atomic.h
+++ b/contrib/libs/cxxsupp/libcxxrt/atomic.h
@@ -1,28 +1,102 @@
+
#ifndef __has_builtin
-#define __has_builtin(x) 0
+# define __has_builtin(x) 0
#endif
#ifndef __has_feature
-#define __has_feature(x) 0
+# define __has_feature(x) 0
+#endif
+#ifndef __has_extension
+# define __has_extension(x) 0
+#endif
+
+#if !__has_extension(c_atomic)
+# define _Atomic(T) T
#endif
-/**
- * Swap macro that enforces a happens-before relationship with a corresponding
- * ATOMIC_LOAD.
- */
#if __has_builtin(__c11_atomic_exchange)
-#define ATOMIC_SWAP(addr, val)\
- __c11_atomic_exchange(reinterpret_cast<_Atomic(__typeof__(val))*>(addr), val, __ATOMIC_ACQ_REL)
-#elif __has_builtin(__sync_swap)
-#define ATOMIC_SWAP(addr, val)\
- __sync_swap(addr, val)
+# define ATOMIC_BUILTIN(name) __c11_atomic_##name
#else
-#define ATOMIC_SWAP(addr, val)\
- __sync_lock_test_and_set(addr, val)
+# define ATOMIC_BUILTIN(name) __atomic_##name##_n
#endif
-#if __has_builtin(__c11_atomic_load)
-#define ATOMIC_LOAD(addr)\
- __c11_atomic_load(reinterpret_cast<_Atomic(__typeof__(*addr))*>(addr), __ATOMIC_ACQUIRE)
+namespace
+{
+ /**
+ * C++11 memory orders. We only need a subset of them.
+ */
+ enum memory_order
+ {
+ /**
+ * Acquire order.
+ */
+ acquire = __ATOMIC_ACQUIRE,
+
+ /**
+ * Release order.
+ */
+ release = __ATOMIC_RELEASE,
+
+ /**
+ * Sequentially consistent memory ordering.
+ */
+ seqcst = __ATOMIC_SEQ_CST
+ };
+
+ /**
+ * Atomic, implements a subset of `std::atomic`.
+ */
+ template<typename T>
+ class atomic
+ {
+ /**
+ * The underlying value. Use C11 atomic qualification if available.
+ */
+ _Atomic(T) val;
+
+ public:
+ /**
+ * Constructor, takes a value.
+ */
+ atomic(T init) : val(init) {}
+
+ /**
+ * Atomically load with the specified memory order.
+ */
+ T load(memory_order order = memory_order::seqcst)
+ {
+ return ATOMIC_BUILTIN(load)(&val, order);
+ }
+
+ /**
+ * Atomically store with the specified memory order.
+ */
+ void store(T v, memory_order order = memory_order::seqcst)
+ {
+ return ATOMIC_BUILTIN(store)(&val, v, order);
+ }
+
+ /**
+ * Atomically exchange with the specified memory order.
+ */
+ T exchange(T v, memory_order order = memory_order::seqcst)
+ {
+ return ATOMIC_BUILTIN(exchange)(&val, v, order);
+ }
+
+ /**
+ * Atomically exchange with the specified memory order.
+ */
+ bool compare_exchange(T & expected,
+ T desired,
+ memory_order order = memory_order::seqcst)
+ {
+#if __has_builtin(__c11_atomic_compare_exchange_strong)
+ return __c11_atomic_compare_exchange_strong(
+ &val, &expected, desired, order, order);
#else
-#define ATOMIC_LOAD(addr)\
- (__sync_synchronize(), *addr)
+ return __atomic_compare_exchange_n(
+ &val, &expected, desired, true, order, order);
#endif
+ }
+ };
+} // namespace
+#undef ATOMIC_BUILTIN
diff --git a/contrib/libs/cxxsupp/libcxxrt/cxxabi.h b/contrib/libs/cxxsupp/libcxxrt/cxxabi.h
index 7a8cb6745f..e96d4c2592 100644
--- a/contrib/libs/cxxsupp/libcxxrt/cxxabi.h
+++ b/contrib/libs/cxxsupp/libcxxrt/cxxabi.h
@@ -77,6 +77,13 @@ struct __cxa_exception
{
#if __LP64__
/**
+ * Now _Unwind_Exception is marked with __attribute__((aligned)), which
+ * implies __cxa_exception is also aligned. Insert padding in the
+ * beginning of the struct, rather than before unwindHeader.
+ */
+ void *reserve;
+
+ /**
* Reference count. Used to support the C++11 exception_ptr class. This
* is prepended to the structure in 64-bit mode and squeezed in to the
* padding left before the 64-bit aligned _Unwind_Exception at the end in
@@ -198,6 +205,14 @@ __cxa_eh_globals *__cxa_get_globals_fast(void);
std::type_info * __cxa_current_exception_type();
+
+void *__cxa_allocate_exception(size_t thrown_size);
+
+void __cxa_free_exception(void* thrown_exception);
+
+__cxa_exception *__cxa_init_primary_exception(
+ void *object, std::type_info* tinfo, void (*dest)(void *));
+
/**
* Throws an exception returned by __cxa_current_primary_exception(). This
* exception may have been caught in another thread.
diff --git a/contrib/libs/cxxsupp/libcxxrt/exception.cc b/contrib/libs/cxxsupp/libcxxrt/exception.cc
index 9bb47c8e9e..0a26befee6 100644
--- a/contrib/libs/cxxsupp/libcxxrt/exception.cc
+++ b/contrib/libs/cxxsupp/libcxxrt/exception.cc
@@ -1,5 +1,6 @@
/*
* Copyright 2010-2011 PathScale, Inc. All rights reserved.
+ * Copyright 2021 David Chisnall. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -139,6 +140,7 @@ struct __cxa_thread_info
terminate_handler terminateHandler;
/** The unexpected exception handler for this thread. */
unexpected_handler unexpectedHandler;
+#ifndef LIBCXXRT_NO_EMERGENCY_MALLOC
/**
* The number of emergency buffers held by this thread. This is 0 in
* normal operation - the emergency buffers are only used when malloc()
@@ -147,6 +149,7 @@ struct __cxa_thread_info
* in ABI spec [3.3.1]).
*/
int emergencyBuffersHeld;
+#endif
/**
* The exception currently running in a cleanup.
*/
@@ -174,6 +177,7 @@ struct __cxa_thread_info
struct __cxa_dependent_exception
{
#if __LP64__
+ void *reserve;
void *primaryException;
#endif
std::type_info *exceptionType;
@@ -196,8 +200,18 @@ struct __cxa_dependent_exception
#endif
_Unwind_Exception unwindHeader;
};
+static_assert(sizeof(__cxa_exception) == sizeof(__cxa_dependent_exception),
+ "__cxa_exception and __cxa_dependent_exception should have the same size");
+static_assert(offsetof(__cxa_exception, referenceCount) ==
+ offsetof(__cxa_dependent_exception, primaryException),
+ "referenceCount and primaryException should have the same offset");
+static_assert(offsetof(__cxa_exception, unwindHeader) ==
+ offsetof(__cxa_dependent_exception, unwindHeader),
+ "unwindHeader fields should have the same offset");
+static_assert(offsetof(__cxa_dependent_exception, unwindHeader) ==
+ offsetof(__cxa_dependent_exception, adjustedPtr) + 8,
+ "there should be no padding before unwindHeader");
-static_assert(sizeof(__cxa_dependent_exception) == sizeof(__cxa_exception));
namespace std
{
@@ -283,19 +297,19 @@ using namespace ABI_NAMESPACE;
*/
static _Unwind_Reason_Code trace(struct _Unwind_Context *context, void *c)
{
- Dl_info myinfo;
- int mylookup =
- dladdr(reinterpret_cast<void *>(__cxa_current_exception_type), &myinfo);
- void *ip = reinterpret_cast<void*>(_Unwind_GetIP(context));
- Dl_info info;
- if (dladdr(ip, &info) != 0)
- {
- if (mylookup == 0 || strcmp(info.dli_fname, myinfo.dli_fname) != 0)
- {
- printf("%p:%s() in %s\n", ip, info.dli_sname, info.dli_fname);
- }
- }
- return _URC_CONTINUE_UNWIND;
+ Dl_info myinfo;
+ int mylookup =
+ dladdr(reinterpret_cast<void *>(__cxa_current_exception_type), &myinfo);
+ void *ip = reinterpret_cast<void*>(_Unwind_GetIP(context));
+ Dl_info info;
+ if (dladdr(ip, &info) != 0)
+ {
+ if (mylookup == 0 || strcmp(info.dli_fname, myinfo.dli_fname) != 0)
+ {
+ printf("%p:%s() in %s\n", ip, info.dli_sname, info.dli_fname);
+ }
+ }
+ return _URC_CONTINUE_UNWIND;
}
static void bt_terminate_handler() {
@@ -335,9 +349,9 @@ static void bt_terminate_handler() {
}
/** The global termination handler. */
-static terminate_handler terminateHandler = bt_terminate_handler;
+static atomic<terminate_handler> terminateHandler = bt_terminate_handler;
/** The global unexpected exception handler. */
-static unexpected_handler unexpectedHandler = std::terminate;
+static atomic<unexpected_handler> unexpectedHandler = std::terminate;
/** Key used for thread-local data. */
static pthread_key_t eh_key;
@@ -512,6 +526,23 @@ extern "C" __cxa_eh_globals *ABI_NAMESPACE::__cxa_get_globals_fast(void)
return &(thread_info_fast()->globals);
}
+#ifdef LIBCXXRT_NO_EMERGENCY_MALLOC
+static char *alloc_or_die(size_t size)
+{
+ char *buffer = static_cast<char*>(calloc(1, size));
+
+ if (buffer == nullptr)
+ {
+ fputs("Out of memory attempting to allocate exception\n", stderr);
+ std::terminate();
+ }
+ return buffer;
+}
+static void free_exception(char *e)
+{
+ free(e);
+}
+#else
/**
* An emergency allocation reserved for when malloc fails. This is treated as
* 16 buffers of 1KB each.
@@ -651,6 +682,7 @@ static void free_exception(char *e)
free(e);
}
}
+#endif
static constexpr size_t align_to(size_t size, size_t alignment) noexcept {
return (size + alignment - 1) / alignment * alignment;
@@ -672,6 +704,7 @@ static_assert(
static constexpr size_t backtrace_buffer_size = 0;
#endif
+
/**
* Allocates an exception structure. Returns a pointer to the space that can
* be used to store an object of thrown_size bytes. This function will use an
@@ -821,12 +854,12 @@ static void throw_exception(__cxa_exception *ex)
ex->unexpectedHandler = info->unexpectedHandler;
if (0 == ex->unexpectedHandler)
{
- ex->unexpectedHandler = unexpectedHandler;
+ ex->unexpectedHandler = unexpectedHandler.load();
}
ex->terminateHandler = info->terminateHandler;
if (0 == ex->terminateHandler)
{
- ex->terminateHandler = terminateHandler;
+ ex->terminateHandler = terminateHandler.load();
}
info->globals.uncaughtExceptions++;
@@ -837,6 +870,21 @@ static void throw_exception(__cxa_exception *ex)
report_failure(err, ex);
}
+extern "C" __cxa_exception *__cxa_init_primary_exception(
+ void *object, std::type_info* tinfo, void (*dest)(void *)) {
+ __cxa_exception *ex = reinterpret_cast<__cxa_exception*>(object) - 1;
+
+ ex->referenceCount = 0;
+ ex->exceptionType = tinfo;
+
+ ex->exceptionDestructor = dest;
+
+ ex->unwindHeader.exception_class = exception_class;
+ ex->unwindHeader.exception_cleanup = exception_cleanup;
+
+ return ex;
+}
+
typedef void (*cxa_throw_hook_t)(void*, std::type_info*, void(*)(void*)) noexcept;
__attribute__((weak)) cxa_throw_hook_t cxa_throw_hook = nullptr;
@@ -850,20 +898,13 @@ extern "C" void __cxa_throw(void *thrown_exception,
std::type_info *tinfo,
void(*dest)(void*))
{
- if (cxa_throw_hook)
- {
- cxa_throw_hook(thrown_exception, tinfo, dest);
- }
-
- __cxa_exception *ex = reinterpret_cast<__cxa_exception*>(thrown_exception) - 1;
+ if (cxa_throw_hook)
+ {
+ cxa_throw_hook(thrown_exception, tinfo, dest);
+ }
+ __cxa_exception *ex = __cxa_init_primary_exception(thrown_exception, tinfo, dest);
ex->referenceCount = 1;
- ex->exceptionType = tinfo;
-
- ex->exceptionDestructor = dest;
-
- ex->unwindHeader.exception_class = exception_class;
- ex->unwindHeader.exception_cleanup = exception_cleanup;
throw_exception(ex);
}
@@ -1567,7 +1608,7 @@ namespace std
{
if (thread_local_handlers) { return pathscale::set_unexpected(f); }
- return ATOMIC_SWAP(&unexpectedHandler, f);
+ return unexpectedHandler.exchange(f);
}
/**
* Sets the function that is called to terminate the program.
@@ -1576,7 +1617,7 @@ namespace std
{
if (thread_local_handlers) { return pathscale::set_terminate(f); }
- return ATOMIC_SWAP(&terminateHandler, f);
+ return terminateHandler.exchange(f);
}
/**
* Terminates the program, calling a custom terminate implementation if
@@ -1592,7 +1633,7 @@ namespace std
// return.
abort();
}
- terminateHandler();
+ terminateHandler.load()();
}
/**
* Called when an unexpected exception is encountered (i.e. an exception
@@ -1609,7 +1650,7 @@ namespace std
// return.
abort();
}
- unexpectedHandler();
+ unexpectedHandler.load()();
}
/**
* Returns whether there are any exceptions currently being thrown that
@@ -1639,7 +1680,7 @@ namespace std
{
return info->unexpectedHandler;
}
- return ATOMIC_LOAD(&unexpectedHandler);
+ return unexpectedHandler.load();
}
/**
* Returns the current terminate handler.
@@ -1651,7 +1692,7 @@ namespace std
{
return info->terminateHandler;
}
- return ATOMIC_LOAD(&terminateHandler);
+ return terminateHandler.load();
}
}
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
@@ -1682,8 +1723,10 @@ asm (
".type __cxa_end_cleanup, \"function\" \n"
"__cxa_end_cleanup: \n"
" push {r1, r2, r3, r4} \n"
+" mov r4, lr \n"
" bl __cxa_get_cleanup \n"
-" push {r1, r2, r3, r4} \n"
+" mov lr, r4 \n"
+" pop {r1, r2, r3, r4} \n"
" b _Unwind_Resume \n"
" bl abort \n"
".popsection \n"
diff --git a/contrib/libs/cxxsupp/libcxxrt/guard.cc b/contrib/libs/cxxsupp/libcxxrt/guard.cc
index 10978716e8..cb58aa7da2 100644
--- a/contrib/libs/cxxsupp/libcxxrt/guard.cc
+++ b/contrib/libs/cxxsupp/libcxxrt/guard.cc
@@ -1,5 +1,6 @@
-/*
+/*
* Copyright 2010-2012 PathScale, Inc. All rights reserved.
+ * Copyright 2021 David Chisnall. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -10,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
* IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
@@ -38,126 +39,310 @@
* value as a low-overhead lock. Because statics (in most sane code) are
* accessed far more times than they are initialised, this lock implementation
* is heavily optimised towards the case where the static has already been
- * initialised.
+ * initialised.
*/
+#include "atomic.h"
+#include <assert.h>
+#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
-#include <stdio.h>
-#include <pthread.h>
-#include <assert.h>
-#include "atomic.h"
// Older GCC doesn't define __LITTLE_ENDIAN__
#ifndef __LITTLE_ENDIAN__
- // If __BYTE_ORDER__ is defined, use that instead
+// If __BYTE_ORDER__ is defined, use that instead
# ifdef __BYTE_ORDER__
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define __LITTLE_ENDIAN__
# endif
- // x86 and ARM are the most common little-endian CPUs, so let's have a
- // special case for them (ARM is already special cased). Assume everything
- // else is big endian.
+// x86 and ARM are the most common little-endian CPUs, so let's have a
+// special case for them (ARM is already special cased). Assume everything
+// else is big endian.
# elif defined(__x86_64) || defined(__i386)
# define __LITTLE_ENDIAN__
# endif
#endif
-
/*
- * The least significant bit of the guard variable indicates that the object
- * has been initialised, the most significant bit is used for a spinlock.
+ * The Itanium C++ ABI defines guard words that are 64-bit (32-bit on AArch32)
+ * values with one bit defined to indicate that the guarded variable is and
+ * another bit to indicate that it's currently locked (initialisation in
+ * progress). The bit to use depends on the byte order of the target.
+ *
+ * On many 32-bit platforms, 64-bit atomics are unavailable (or slow) and so we
+ * treat the two halves of the 64-bit word as independent values and establish
+ * an ordering on them such that the guard word is never modified unless the
+ * lock word is in the locked state. This means that we can do double-checked
+ * locking by loading the guard word and, if it is not initialised, trying to
+ * transition the lock word from the unlocked to locked state, and then
+ * manipulate the guard word.
*/
+namespace
+{
+ /**
+ * The state of the guard variable when an attempt is made to lock it.
+ */
+ enum class GuardState
+ {
+ /**
+ * The lock is not held but is not needed because initialisation is
+ * one.
+ */
+ InitDone,
+
+ /**
+ * Initialisation is not done but the lock is held by the caller.
+ */
+ InitLockSucceeded,
+
+ /**
+ * Attempting to acquire the lock failed.
+ */
+ InitLockFailed
+ };
+
+ /**
+ * Class encapsulating a single atomic word being used to represent the
+ * guard. The word size is defined by the type of `GuardWord`. The bit
+ * used to indicate the locked state is `1<<LockedBit`, the bit used to
+ * indicate the initialised state is `1<<InitBit`.
+ */
+ template<typename GuardWord, int LockedBit, int InitBit>
+ struct SingleWordGuard
+ {
+ /**
+ * The value indicating that the lock bit is set (and no other bits).
+ */
+ static constexpr GuardWord locked = static_cast<GuardWord>(1)
+ << LockedBit;
+
+ /**
+ * The value indicating that the initialised bit is set (and all other
+ * bits are zero).
+ */
+ static constexpr GuardWord initialised = static_cast<GuardWord>(1)
+ << InitBit;
+
+ /**
+ * The guard variable.
+ */
+ atomic<GuardWord> val;
+
+ public:
+ /**
+ * Release the lock and set the initialised state. In the single-word
+ * implementation here, these are both done by a single store.
+ */
+ void unlock(bool isInitialised)
+ {
+ val.store(isInitialised ? initialised : 0, memory_order::release);
+#ifndef NDEBUG
+ GuardWord init_state = initialised;
+ assert(*reinterpret_cast<uint8_t*>(&init_state) != 0);
+#endif
+ }
+
+ /**
+ * Try to acquire the lock. This has a tri-state return, indicating
+ * either that the lock was acquired, it wasn't acquired because it was
+ * contended, or it wasn't acquired because the guarded variable is
+ * already initialised.
+ */
+ GuardState try_lock()
+ {
+ GuardWord old = 0;
+ // Try to acquire the lock, assuming that we are in the state where
+ // the lock is not held and the variable is not initialised (so the
+ // expected value is 0).
+ if (val.compare_exchange(old, locked))
+ {
+ return GuardState::InitLockSucceeded;
+ }
+ // If the CAS failed and the old value indicates that this is
+ // initialised, return that initialisation is done and skip further
+ // retries.
+ if (old == initialised)
+ {
+ return GuardState::InitDone;
+ }
+ // Otherwise, report failure.
+ return GuardState::InitLockFailed;
+ }
+
+ /**
+ * Check whether the guard indicates that the variable is initialised.
+ */
+ bool is_initialised()
+ {
+ return (val.load(memory_order::acquire) & initialised) ==
+ initialised;
+ }
+ };
+
+ /**
+ * Class encapsulating using two 32-bit atomic values to represent a 64-bit
+ * guard variable.
+ */
+ template<int LockedBit, int InitBit>
+ class DoubleWordGuard
+ {
+ /**
+ * The value of `lock_word` when the lock is held.
+ */
+ static constexpr uint32_t locked = static_cast<uint32_t>(1)
+ << LockedBit;
+
+ /**
+ * The value of `init_word` when the guarded variable is initialised.
+ */
+ static constexpr uint32_t initialised = static_cast<uint32_t>(1)
+ << InitBit;
+
+ /**
+ * The word used for the initialised flag. This is always the first
+ * word irrespective of endian because the generated code compares the
+ * first byte in memory against 0.
+ */
+ atomic<uint32_t> init_word;
+
+ /**
+ * The word used for the lock.
+ */
+ atomic<uint32_t> lock_word;
+
+ public:
+ /**
+ * Try to acquire the lock. This has a tri-state return, indicating
+ * either that the lock was acquired, it wasn't acquired because it was
+ * contended, or it wasn't acquired because the guarded variable is
+ * already initialised.
+ */
+ GuardState try_lock()
+ {
+ uint32_t old = 0;
+ // Try to acquire the lock
+ if (lock_word.compare_exchange(old, locked))
+ {
+ // If we succeeded, check if initialisation has happened. In
+ // this version, we don't have atomic manipulation of both the
+ // lock and initialised bits together. Instead, we have an
+ // ordering rule that the initialised bit is only ever updated
+ // with the lock held.
+ if (is_initialised())
+ {
+ // If another thread did manage to initialise this, release
+ // the lock and notify the caller that initialisation is
+ // done.
+ lock_word.store(0, memory_order::release);
+ return GuardState::InitDone;
+ }
+ return GuardState::InitLockSucceeded;
+ }
+ return GuardState::InitLockFailed;
+ }
+
+ /**
+ * Set the initialised state and release the lock. In this
+ * implementation, this is ordered, not atomic: the initialise bit is
+ * set while the lock is held.
+ */
+ void unlock(bool isInitialised)
+ {
+ init_word.store(isInitialised ? initialised : 0,
+ memory_order::release);
+ lock_word.store(0, memory_order::release);
+ assert((*reinterpret_cast<uint8_t*>(this) != 0) == isInitialised);
+ }
+
+ /**
+ * Return whether the guarded variable is initialised.
+ */
+ bool is_initialised()
+ {
+ return (init_word.load(memory_order::acquire) & initialised) ==
+ initialised;
+ }
+ };
+
+ // Check that the two implementations are the correct size.
+ static_assert(sizeof(SingleWordGuard<uint32_t, 31, 0>) == sizeof(uint32_t),
+ "Single-word 32-bit guard must be 32 bits");
+ static_assert(sizeof(SingleWordGuard<uint64_t, 63, 0>) == sizeof(uint64_t),
+ "Single-word 64-bit guard must be 64 bits");
+ static_assert(sizeof(DoubleWordGuard<31, 0>) == sizeof(uint64_t),
+ "Double-word guard must be 64 bits");
+
#ifdef __arm__
-// ARM ABI - 32-bit guards.
-typedef uint32_t guard_t;
-typedef uint32_t guard_lock_t;
-static const uint32_t LOCKED = static_cast<guard_t>(1) << 31;
-static const uint32_t INITIALISED = 1;
-#define LOCK_PART(guard) (guard)
-#define INIT_PART(guard) (guard)
+ /**
+ * The Arm PCS defines a variant of the Itanium ABI with 32-bit lock words.
+ */
+ using Guard = SingleWordGuard<uint32_t, 31, 0>;
#elif defined(_LP64)
-typedef uint64_t guard_t;
-typedef uint64_t guard_lock_t;
# if defined(__LITTLE_ENDIAN__)
-static const guard_t LOCKED = static_cast<guard_t>(1) << 63;
-static const guard_t INITIALISED = 1;
+ /**
+ * On little-endian 64-bit platforms the guard word is a single 64-bit
+ * atomic with the lock in the high bit and the initialised flag in the low
+ * bit.
+ */
+ using Guard = SingleWordGuard<uint64_t, 63, 0>;
# else
-static const guard_t LOCKED = 1;
-static const guard_t INITIALISED = static_cast<guard_t>(1) << 56;
+ /**
+ * On bit-endian 64-bit platforms, the guard word is a single 64-bit atomic
+ * with the lock in the low bit and the initialised bit in the highest
+ * byte.
+ */
+ using Guard = SingleWordGuard<uint64_t, 0, 56>;
# endif
-#define LOCK_PART(guard) (guard)
-#define INIT_PART(guard) (guard)
#else
-typedef uint32_t guard_lock_t;
# if defined(__LITTLE_ENDIAN__)
-typedef struct {
- uint32_t init_half;
- uint32_t lock_half;
-} guard_t;
-static const uint32_t LOCKED = static_cast<guard_lock_t>(1) << 31;
-static const uint32_t INITIALISED = 1;
+ /**
+ * 32-bit platforms use the same layout as 64-bit.
+ */
+ using Guard = DoubleWordGuard<31, 0>;
# else
-typedef struct {
- uint32_t init_half;
- uint32_t lock_half;
-} guard_t;
-static_assert(sizeof(guard_t) == sizeof(uint64_t), "");
-static const uint32_t LOCKED = 1;
-static const uint32_t INITIALISED = static_cast<guard_lock_t>(1) << 24;
+ /**
+ * 32-bit platforms use the same layout as 64-bit.
+ */
+ using Guard = DoubleWordGuard<0, 24>;
# endif
-#define LOCK_PART(guard) (&(guard)->lock_half)
-#define INIT_PART(guard) (&(guard)->init_half)
#endif
-static const guard_lock_t INITIAL = 0;
+
+} // namespace
/**
* Acquires a lock on a guard, returning 0 if the object has already been
* initialised, and 1 if it has not. If the object is already constructed then
* this function just needs to read a byte from memory and return.
*/
-extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
+extern "C" int __cxa_guard_acquire(Guard *guard_object)
{
- guard_lock_t old;
- // Not an atomic read, doesn't establish a happens-before relationship, but
- // if one is already established and we end up seeing an initialised state
- // then it's a fast path, otherwise we'll do something more expensive than
- // this test anyway...
- if (INITIALISED == __atomic_load_n(INIT_PART(guard_object), __ATOMIC_RELAXED))
+ // Check if this is already initialised. If so, we don't have to do
+ // anything.
+ if (guard_object->is_initialised())
+ {
return 0;
- // Spin trying to do the initialisation
+ }
+ // Spin trying to acquire the lock. If we fail to acquire the lock the
+ // first time then another thread will *probably* initialise it, but if the
+ // constructor throws an exception then we may have to try again in this
+ // thread.
for (;;)
{
- // Loop trying to move the value of the guard from 0 (not
- // locked, not initialised) to the locked-uninitialised
- // position.
- old = __sync_val_compare_and_swap(LOCK_PART(guard_object),
- INITIAL, LOCKED);
- if (old == INITIAL) {
- // Lock obtained. If lock and init bit are
- // in separate words, check for init race.
- if (INIT_PART(guard_object) == LOCK_PART(guard_object))
- return 1;
- if (INITIALISED != *INIT_PART(guard_object))
+ // Try to acquire the lock.
+ switch (guard_object->try_lock())
+ {
+ // If we failed to acquire the lock but another thread has
+ // initialised the lock while we were waiting, return immediately
+ // indicating that initialisation is not required.
+ case GuardState::InitDone:
+ return 0;
+ // If we acquired the lock, return immediately to start
+ // initialisation.
+ case GuardState::InitLockSucceeded:
return 1;
-
- // No need for a memory barrier here,
- // see first comment.
- __atomic_store_n(LOCK_PART(guard_object), INITIAL, __ATOMIC_RELAXED);
- return 0;
+ // If we didn't acquire the lock, pause and retry.
+ case GuardState::InitLockFailed:
+ break;
}
- // If lock and init bit are in the same word, check again
- // if we are done.
- if (INIT_PART(guard_object) == LOCK_PART(guard_object) &&
- old == INITIALISED)
- return 0;
-
- assert(old == LOCKED);
- // Another thread holds the lock.
- // If lock and init bit are in different words, check
- // if we are done before yielding and looping.
- if (INIT_PART(guard_object) != LOCK_PART(guard_object) &&
- INITIALISED == *INIT_PART(guard_object))
- return 0;
sched_yield();
}
}
@@ -166,28 +351,16 @@ extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
* Releases the lock without marking the object as initialised. This function
* is called if initialising a static causes an exception to be thrown.
*/
-extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
+extern "C" void __cxa_guard_abort(Guard *guard_object)
{
- __attribute__((unused))
- bool reset = __sync_bool_compare_and_swap(LOCK_PART(guard_object),
- LOCKED, INITIAL);
- assert(reset);
+ guard_object->unlock(false);
}
+
/**
* Releases the guard and marks the object as initialised. This function is
* called after successful initialisation of a static.
*/
-extern "C" void __cxa_guard_release(volatile guard_t *guard_object)
+extern "C" void __cxa_guard_release(Guard *guard_object)
{
- guard_lock_t old;
- if (INIT_PART(guard_object) == LOCK_PART(guard_object))
- old = LOCKED;
- else
- old = INITIAL;
- __attribute__((unused))
- bool reset = __sync_bool_compare_and_swap(INIT_PART(guard_object),
- old, INITIALISED);
- assert(reset);
- if (INIT_PART(guard_object) != LOCK_PART(guard_object))
- *LOCK_PART(guard_object) = INITIAL;
+ guard_object->unlock(true);
}
diff --git a/contrib/libs/cxxsupp/libcxxrt/memory.cc b/contrib/libs/cxxsupp/libcxxrt/memory.cc
index 694c5d5037..caa0aba79a 100644
--- a/contrib/libs/cxxsupp/libcxxrt/memory.cc
+++ b/contrib/libs/cxxsupp/libcxxrt/memory.cc
@@ -51,7 +51,7 @@ typedef void (*new_handler)();
* The function to call when allocation fails. By default, there is no
* handler and a bad allocation exception is thrown if an allocation fails.
*/
-static new_handler new_handl;
+static atomic<new_handler> new_handl{nullptr};
namespace std
{
@@ -61,12 +61,13 @@ namespace std
__attribute__((weak))
new_handler set_new_handler(new_handler handler) noexcept
{
- return ATOMIC_SWAP(&new_handl, handler);
+ return new_handl.exchange(handler);
}
+
__attribute__((weak))
new_handler get_new_handler(void) noexcept
{
- return ATOMIC_LOAD(&new_handl);
+ return new_handl.load();
}
}
diff --git a/contrib/libs/cxxsupp/libcxxrt/ya.make b/contrib/libs/cxxsupp/libcxxrt/ya.make
index 66955cff48..93c9d74bb0 100644
--- a/contrib/libs/cxxsupp/libcxxrt/ya.make
+++ b/contrib/libs/cxxsupp/libcxxrt/ya.make
@@ -1,4 +1,4 @@
-# Generated by devtools/yamaker from nixpkgs 22.05.
+# Generated by devtools/yamaker from nixpkgs 22.11.
LIBRARY()
@@ -11,9 +11,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(2021-09-08)
+VERSION(2023-10-11)
-ORIGINAL_SOURCE(https://github.com/libcxxrt/libcxxrt/archive/14bf5d5526056ae1cc16f03b7b8e96108a1e38d0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/libcxxrt/libcxxrt/archive/03c83f5a57be8c5b1a29a68de5638744f17d28ba.tar.gz)
ADDINCL(
contrib/libs/cxxsupp/libcxxrt
@@ -23,10 +23,14 @@ NO_COMPILER_WARNINGS()
NO_RUNTIME()
-CXXFLAGS(-nostdinc++)
+CXXFLAGS(
+ -nostdinc++
+)
IF (CXX_UNWIND == "glibcxx_dynamic" OR ARCH_PPC64LE)
- LDFLAGS(-lgcc_s)
+ LDFLAGS(
+ -lgcc_s
+ )
ELSE()
PEERDIR(
contrib/libs/libunwind
@@ -35,6 +39,7 @@ ENDIF()
IF (SANITIZER_TYPE == undefined OR FUZZING)
NO_SANITIZE()
+
NO_SANITIZE_COVERAGE()
ENDIF()