diff options
author | nkozlovskiy <nmk@ydb.tech> | 2023-12-04 19:26:35 +0300 |
---|---|---|
committer | nkozlovskiy <nmk@ydb.tech> | 2023-12-05 05:25:43 +0300 |
commit | e62474f851635573f9f6631039e113a02fd50179 (patch) | |
tree | 597d4bc8aad74ef42c55fd062398e93eceebfee3 /contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h | |
parent | e7eddec34be4f360877b46ffa2b70fde8a3a5b8f (diff) | |
download | ydb-e62474f851635573f9f6631039e113a02fd50179.tar.gz |
ydb-oss sync: add clang16-rt/ to additionalPathsToCopy
Diffstat (limited to 'contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h')
-rw-r--r-- | contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h b/contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h new file mode 100644 index 0000000000..d88f5d7be6 --- /dev/null +++ b/contrib/libs/clang16-rt/lib/scudo/standalone/atomic_helpers.h @@ -0,0 +1,145 @@ +//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ATOMIC_H_ +#define SCUDO_ATOMIC_H_ + +#include "internal_defs.h" + +namespace scudo { + +enum memory_order { + memory_order_relaxed = 0, + memory_order_consume = 1, + memory_order_acquire = 2, + memory_order_release = 3, + memory_order_acq_rel = 4, + memory_order_seq_cst = 5 +}; +static_assert(memory_order_relaxed == __ATOMIC_RELAXED, ""); +static_assert(memory_order_consume == __ATOMIC_CONSUME, ""); +static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, ""); +static_assert(memory_order_release == __ATOMIC_RELEASE, ""); +static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, ""); +static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, ""); + +struct atomic_u8 { + typedef u8 Type; + volatile Type ValDoNotUse; +}; + +struct atomic_u16 { + typedef u16 Type; + volatile Type ValDoNotUse; +}; + +struct atomic_s32 { + typedef s32 Type; + volatile Type ValDoNotUse; +}; + +struct atomic_u32 { + typedef u32 Type; + volatile Type ValDoNotUse; +}; + +struct atomic_u64 { + typedef u64 Type; + // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. + alignas(8) volatile Type ValDoNotUse; +}; + +struct atomic_uptr { + typedef uptr Type; + volatile Type ValDoNotUse; +}; + +template <typename T> +inline typename T::Type atomic_load(const volatile T *A, memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + typename T::Type V; + __atomic_load(&A->ValDoNotUse, &V, MO); + return V; +} + +template <typename T> +inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + __atomic_store(&A->ValDoNotUse, &V, MO); +} + +inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } + +template <typename T> +inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + return __atomic_fetch_add(&A->ValDoNotUse, V, MO); +} + +template <typename T> +inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); +} + +template <typename T> +inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + return __atomic_fetch_and(&A->ValDoNotUse, V, MO); +} + +template <typename T> +inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + return __atomic_fetch_or(&A->ValDoNotUse, V, MO); +} + +template <typename T> +inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); + typename T::Type R; + __atomic_exchange(&A->ValDoNotUse, &V, &R, MO); + return R; +} + +template <typename T> +inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, + typename T::Type Xchg, + memory_order MO) { + return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO, + __ATOMIC_RELAXED); +} + +// Clutter-reducing helpers. + +template <typename T> +inline typename T::Type atomic_load_relaxed(const volatile T *A) { + return atomic_load(A, memory_order_relaxed); +} + +template <typename T> +inline void atomic_store_relaxed(volatile T *A, typename T::Type V) { + atomic_store(A, V, memory_order_relaxed); +} + +template <typename T> +inline typename T::Type atomic_compare_exchange(volatile T *A, + typename T::Type Cmp, + typename T::Type Xchg) { + atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); + return Cmp; +} + +} // namespace scudo + +#endif // SCUDO_ATOMIC_H_ |