diff options
author | vskipin <vskipin@yandex-team.ru> | 2022-02-10 16:46:00 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:46:00 +0300 |
commit | 4d8b546b89b5afc08cf3667e176271c7ba935f33 (patch) | |
tree | 1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /util/system | |
parent | 4e4b78bd7b67e2533da4dbb9696374a6d6068e32 (diff) | |
download | ydb-4d8b546b89b5afc08cf3667e176271c7ba935f33.tar.gz |
Restoring authorship annotation for <vskipin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'util/system')
-rw-r--r-- | util/system/atomic.h | 4 | ||||
-rw-r--r-- | util/system/atomic_gcc.h | 44 | ||||
-rw-r--r-- | util/system/atomic_ops.h | 168 | ||||
-rw-r--r-- | util/system/atomic_ut.cpp | 30 | ||||
-rw-r--r-- | util/system/atomic_win.h | 14 | ||||
-rw-r--r-- | util/system/sanitizers.cpp | 2 | ||||
-rw-r--r-- | util/system/sanitizers.h | 94 | ||||
-rw-r--r-- | util/system/sem.cpp | 6 | ||||
-rw-r--r-- | util/system/spinlock.h | 12 |
9 files changed, 187 insertions, 187 deletions
diff --git a/util/system/atomic.h b/util/system/atomic.h index 0de0ad3d54..80265babfd 100644 --- a/util/system/atomic.h +++ b/util/system/atomic.h @@ -2,8 +2,8 @@ #include "defaults.h" -using TAtomicBase = intptr_t; -using TAtomic = volatile TAtomicBase; +using TAtomicBase = intptr_t; +using TAtomic = volatile TAtomicBase; #if defined(__GNUC__) #include "atomic_gcc.h" diff --git a/util/system/atomic_gcc.h b/util/system/atomic_gcc.h index 39d3987fdf..ed8dc2bdc5 100644 --- a/util/system/atomic_gcc.h +++ b/util/system/atomic_gcc.h @@ -5,32 +5,32 @@ : \ : "memory") -static inline TAtomicBase AtomicGet(const TAtomic& a) { - TAtomicBase tmp; +static inline TAtomicBase AtomicGet(const TAtomic& a) { + TAtomicBase tmp; #if defined(_arm64_) - __asm__ __volatile__( - "ldar %x[value], %[ptr] \n\t" - : [value] "=r"(tmp) - : [ptr] "Q"(a) - : "memory"); + __asm__ __volatile__( + "ldar %x[value], %[ptr] \n\t" + : [value] "=r"(tmp) + : [ptr] "Q"(a) + : "memory"); #else - __atomic_load(&a, &tmp, __ATOMIC_ACQUIRE); + __atomic_load(&a, &tmp, __ATOMIC_ACQUIRE); #endif - return tmp; -} - -static inline void AtomicSet(TAtomic& a, TAtomicBase v) { + return tmp; +} + +static inline void AtomicSet(TAtomic& a, TAtomicBase v) { #if defined(_arm64_) - __asm__ __volatile__( - "stlr %x[value], %[ptr] \n\t" - : [ptr] "=Q"(a) - : [value] "r"(v) - : "memory"); -#else - __atomic_store(&a, &v, __ATOMIC_RELEASE); -#endif + __asm__ __volatile__( + "stlr %x[value], %[ptr] \n\t" + : [ptr] "=Q"(a) + : [value] "r"(v) + : "memory"); +#else + __atomic_store(&a, &v, __ATOMIC_RELEASE); +#endif } - + static inline intptr_t AtomicIncrement(TAtomic& p) { return __atomic_add_fetch(&p, 1, __ATOMIC_SEQ_CST); } @@ -55,7 +55,7 @@ static inline intptr_t AtomicGetAndAdd(TAtomic& p, intptr_t v) { return __atomic_fetch_add(&p, v, __ATOMIC_SEQ_CST); } -static inline intptr_t AtomicSwap(TAtomic* p, intptr_t v) { +static inline intptr_t AtomicSwap(TAtomic* p, intptr_t v) { (void)p; // disable strange 'parameter set but not used' warning on gcc intptr_t ret; __atomic_exchange(p, &v, &ret, __ATOMIC_SEQ_CST); diff --git a/util/system/atomic_ops.h b/util/system/atomic_ops.h index 46a4a3a4ab..76766b4a0a 100644 --- a/util/system/atomic_ops.h +++ b/util/system/atomic_ops.h @@ -1,20 +1,20 @@ #pragma once -#include <util/generic/typetraits.h> - -template <typename T> -inline TAtomic* AsAtomicPtr(T volatile* target) { - return reinterpret_cast<TAtomic*>(target); -} - -template <typename T> -inline const TAtomic* AsAtomicPtr(T const volatile* target) { - return reinterpret_cast<const TAtomic*>(target); -} - -// integral types - -template <typename T> +#include <util/generic/typetraits.h> + +template <typename T> +inline TAtomic* AsAtomicPtr(T volatile* target) { + return reinterpret_cast<TAtomic*>(target); +} + +template <typename T> +inline const TAtomic* AsAtomicPtr(T const volatile* target) { + return reinterpret_cast<const TAtomic*>(target); +} + +// integral types + +template <typename T> struct TAtomicTraits { enum { Castable = std::is_integral<T>::value && sizeof(T) == sizeof(TAtomicBase) && !std::is_const<T>::value, @@ -25,18 +25,18 @@ template <typename T, typename TT> using TEnableIfCastable = std::enable_if_t<TAtomicTraits<T>::Castable, TT>; template <typename T> -inline TEnableIfCastable<T, T> AtomicGet(T const volatile& target) { - return static_cast<T>(AtomicGet(*AsAtomicPtr(&target))); +inline TEnableIfCastable<T, T> AtomicGet(T const volatile& target) { + return static_cast<T>(AtomicGet(*AsAtomicPtr(&target))); } template <typename T> -inline TEnableIfCastable<T, void> AtomicSet(T volatile& target, TAtomicBase value) { - AtomicSet(*AsAtomicPtr(&target), value); +inline TEnableIfCastable<T, void> AtomicSet(T volatile& target, TAtomicBase value) { + AtomicSet(*AsAtomicPtr(&target), value); } template <typename T> -inline TEnableIfCastable<T, T> AtomicIncrement(T volatile& target) { - return static_cast<T>(AtomicIncrement(*AsAtomicPtr(&target))); +inline TEnableIfCastable<T, T> AtomicIncrement(T volatile& target) { + return static_cast<T>(AtomicIncrement(*AsAtomicPtr(&target))); } template <typename T> @@ -45,8 +45,8 @@ inline TEnableIfCastable<T, T> AtomicGetAndIncrement(T volatile& target) { } template <typename T> -inline TEnableIfCastable<T, T> AtomicDecrement(T volatile& target) { - return static_cast<T>(AtomicDecrement(*AsAtomicPtr(&target))); +inline TEnableIfCastable<T, T> AtomicDecrement(T volatile& target) { + return static_cast<T>(AtomicDecrement(*AsAtomicPtr(&target))); } template <typename T> @@ -55,8 +55,8 @@ inline TEnableIfCastable<T, T> AtomicGetAndDecrement(T volatile& target) { } template <typename T> -inline TEnableIfCastable<T, T> AtomicAdd(T volatile& target, TAtomicBase value) { - return static_cast<T>(AtomicAdd(*AsAtomicPtr(&target), value)); +inline TEnableIfCastable<T, T> AtomicAdd(T volatile& target, TAtomicBase value) { + return static_cast<T>(AtomicAdd(*AsAtomicPtr(&target), value)); } template <typename T> @@ -65,8 +65,8 @@ inline TEnableIfCastable<T, T> AtomicGetAndAdd(T volatile& target, TAtomicBase v } template <typename T> -inline TEnableIfCastable<T, T> AtomicSub(T volatile& target, TAtomicBase value) { - return static_cast<T>(AtomicSub(*AsAtomicPtr(&target), value)); +inline TEnableIfCastable<T, T> AtomicSub(T volatile& target, TAtomicBase value) { + return static_cast<T>(AtomicSub(*AsAtomicPtr(&target), value)); } template <typename T> @@ -75,12 +75,12 @@ inline TEnableIfCastable<T, T> AtomicGetAndSub(T volatile& target, TAtomicBase v } template <typename T> -inline TEnableIfCastable<T, T> AtomicSwap(T volatile* target, TAtomicBase exchange) { - return static_cast<T>(AtomicSwap(AsAtomicPtr(target), exchange)); +inline TEnableIfCastable<T, T> AtomicSwap(T volatile* target, TAtomicBase exchange) { + return static_cast<T>(AtomicSwap(AsAtomicPtr(target), exchange)); } template <typename T> -inline TEnableIfCastable<T, bool> AtomicCas(T volatile* target, TAtomicBase exchange, TAtomicBase compare) { +inline TEnableIfCastable<T, bool> AtomicCas(T volatile* target, TAtomicBase exchange, TAtomicBase compare) { return AtomicCas(AsAtomicPtr(target), exchange, compare); } @@ -90,12 +90,12 @@ inline TEnableIfCastable<T, T> AtomicGetAndCas(T volatile* target, TAtomicBase e } template <typename T> -inline TEnableIfCastable<T, bool> AtomicTryLock(T volatile* target) { +inline TEnableIfCastable<T, bool> AtomicTryLock(T volatile* target) { return AtomicTryLock(AsAtomicPtr(target)); } template <typename T> -inline TEnableIfCastable<T, bool> AtomicTryAndTryLock(T volatile* target) { +inline TEnableIfCastable<T, bool> AtomicTryAndTryLock(T volatile* target) { return AtomicTryAndTryLock(AsAtomicPtr(target)); } @@ -103,85 +103,85 @@ template <typename T> inline TEnableIfCastable<T, void> AtomicUnlock(T volatile* target) { AtomicUnlock(AsAtomicPtr(target)); } - -template <typename T> -inline TEnableIfCastable<T, T> AtomicOr(T volatile& target, TAtomicBase value) { - return static_cast<T>(AtomicOr(*AsAtomicPtr(&target), value)); -} - -template <typename T> -inline TEnableIfCastable<T, T> AtomicAnd(T volatile& target, TAtomicBase value) { - return static_cast<T>(AtomicAnd(*AsAtomicPtr(&target), value)); -} - + +template <typename T> +inline TEnableIfCastable<T, T> AtomicOr(T volatile& target, TAtomicBase value) { + return static_cast<T>(AtomicOr(*AsAtomicPtr(&target), value)); +} + +template <typename T> +inline TEnableIfCastable<T, T> AtomicAnd(T volatile& target, TAtomicBase value) { + return static_cast<T>(AtomicAnd(*AsAtomicPtr(&target), value)); +} + template <typename T> inline TEnableIfCastable<T, T> AtomicXor(T volatile& target, TAtomicBase value) { return static_cast<T>(AtomicXor(*AsAtomicPtr(&target), value)); } -// pointer types - -template <typename T> -inline T* AtomicGet(T* const volatile& target) { - return reinterpret_cast<T*>(AtomicGet(*AsAtomicPtr(&target))); -} - -template <typename T> -inline void AtomicSet(T* volatile& target, T* value) { - AtomicSet(*AsAtomicPtr(&target), reinterpret_cast<TAtomicBase>(value)); -} - +// pointer types + +template <typename T> +inline T* AtomicGet(T* const volatile& target) { + return reinterpret_cast<T*>(AtomicGet(*AsAtomicPtr(&target))); +} + +template <typename T> +inline void AtomicSet(T* volatile& target, T* value) { + AtomicSet(*AsAtomicPtr(&target), reinterpret_cast<TAtomicBase>(value)); +} + using TNullPtr = decltype(nullptr); -template <typename T> +template <typename T> inline void AtomicSet(T* volatile& target, TNullPtr) { - AtomicSet(*AsAtomicPtr(&target), 0); -} - -template <typename T> -inline T* AtomicSwap(T* volatile* target, T* exchange) { - return reinterpret_cast<T*>(AtomicSwap(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange))); -} - -template <typename T> + AtomicSet(*AsAtomicPtr(&target), 0); +} + +template <typename T> +inline T* AtomicSwap(T* volatile* target, T* exchange) { + return reinterpret_cast<T*>(AtomicSwap(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange))); +} + +template <typename T> inline T* AtomicSwap(T* volatile* target, TNullPtr) { - return reinterpret_cast<T*>(AtomicSwap(AsAtomicPtr(target), 0)); -} - -template <typename T> -inline bool AtomicCas(T* volatile* target, T* exchange, T* compare) { - return AtomicCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), reinterpret_cast<TAtomicBase>(compare)); -} - -template <typename T> + return reinterpret_cast<T*>(AtomicSwap(AsAtomicPtr(target), 0)); +} + +template <typename T> +inline bool AtomicCas(T* volatile* target, T* exchange, T* compare) { + return AtomicCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), reinterpret_cast<TAtomicBase>(compare)); +} + +template <typename T> inline T* AtomicGetAndCas(T* volatile* target, T* exchange, T* compare) { return reinterpret_cast<T*>(AtomicGetAndCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), reinterpret_cast<TAtomicBase>(compare))); } template <typename T> inline bool AtomicCas(T* volatile* target, T* exchange, TNullPtr) { - return AtomicCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), 0); -} - -template <typename T> + return AtomicCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), 0); +} + +template <typename T> inline T* AtomicGetAndCas(T* volatile* target, T* exchange, TNullPtr) { return reinterpret_cast<T*>(AtomicGetAndCas(AsAtomicPtr(target), reinterpret_cast<TAtomicBase>(exchange), 0)); } template <typename T> inline bool AtomicCas(T* volatile* target, TNullPtr, T* compare) { - return AtomicCas(AsAtomicPtr(target), 0, reinterpret_cast<TAtomicBase>(compare)); -} - -template <typename T> + return AtomicCas(AsAtomicPtr(target), 0, reinterpret_cast<TAtomicBase>(compare)); +} + +template <typename T> inline T* AtomicGetAndCas(T* volatile* target, TNullPtr, T* compare) { return reinterpret_cast<T*>(AtomicGetAndCas(AsAtomicPtr(target), 0, reinterpret_cast<TAtomicBase>(compare))); } template <typename T> inline bool AtomicCas(T* volatile* target, TNullPtr, TNullPtr) { - return AtomicCas(AsAtomicPtr(target), 0, 0); -} + return AtomicCas(AsAtomicPtr(target), 0, 0); +} template <typename T> inline T* AtomicGetAndCas(T* volatile* target, TNullPtr, TNullPtr) { diff --git a/util/system/atomic_ut.cpp b/util/system/atomic_ut.cpp index c53722766a..07211ffba7 100644 --- a/util/system/atomic_ut.cpp +++ b/util/system/atomic_ut.cpp @@ -145,13 +145,13 @@ private: inline void TestAtomicSwap() { TAtomic v = 0; - UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, 3), 0); - UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, 5), 3); - UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, -7), 5); - UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, Max<intptr_t>()), -7); + UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, 3), 0); + UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, 5), 3); + UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, -7), 5); + UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&v, Max<intptr_t>()), -7); UNIT_ASSERT_VALUES_EQUAL(v, Max<intptr_t>()); } - + inline void TestAtomicOr() { TAtomic v = 0xf0; @@ -173,19 +173,19 @@ private: UNIT_ASSERT_VALUES_EQUAL(AtomicXor(v, 0xff), 0x00); } - inline void TestAtomicPtr() { - int* p; + inline void TestAtomicPtr() { + int* p; AtomicSet(p, nullptr); - - UNIT_ASSERT_VALUES_EQUAL(AtomicGet(p), 0); - - int i; - AtomicSet(p, &i); - - UNIT_ASSERT_VALUES_EQUAL(AtomicGet(p), &i); + + UNIT_ASSERT_VALUES_EQUAL(AtomicGet(p), 0); + + int i; + AtomicSet(p, &i); + + UNIT_ASSERT_VALUES_EQUAL(AtomicGet(p), &i); UNIT_ASSERT_VALUES_EQUAL(AtomicSwap(&p, nullptr), &i); UNIT_ASSERT(AtomicCas(&p, &i, nullptr)); - } + } }; UNIT_TEST_SUITE_REGISTRATION(TAtomicTest<TAtomic>); diff --git a/util/system/atomic_win.h b/util/system/atomic_win.h index 124abbc698..65c290e6cc 100644 --- a/util/system/atomic_win.h +++ b/util/system/atomic_win.h @@ -2,8 +2,8 @@ #include <intrin.h> -#define USE_GENERIC_SETGET - +#define USE_GENERIC_SETGET + #if defined(_i386_) #pragma intrinsic(_InterlockedIncrement) @@ -36,8 +36,8 @@ static inline intptr_t AtomicGetAndAdd(TAtomic& a, intptr_t b) { return _InterlockedExchangeAdd((volatile long*)&a, b); } -static inline intptr_t AtomicSwap(TAtomic* a, intptr_t b) { - return _InterlockedExchange((volatile long*)a, b); +static inline intptr_t AtomicSwap(TAtomic* a, intptr_t b) { + return _InterlockedExchange((volatile long*)a, b); } static inline bool AtomicCas(TAtomic* a, intptr_t exchange, intptr_t compare) { @@ -80,8 +80,8 @@ static inline intptr_t AtomicGetAndAdd(TAtomic& a, intptr_t b) { return _InterlockedExchangeAdd64((volatile __int64*)&a, b); } -static inline intptr_t AtomicSwap(TAtomic* a, intptr_t b) { - return _InterlockedExchange64((volatile __int64*)a, b); +static inline intptr_t AtomicSwap(TAtomic* a, intptr_t b) { + return _InterlockedExchange64((volatile __int64*)a, b); } static inline bool AtomicCas(TAtomic* a, intptr_t exchange, intptr_t compare) { @@ -110,5 +110,5 @@ static inline intptr_t AtomicXor(TAtomic& a, intptr_t b) { static inline void AtomicBarrier() { TAtomic val = 0; - AtomicSwap(&val, 0); + AtomicSwap(&val, 0); } diff --git a/util/system/sanitizers.cpp b/util/system/sanitizers.cpp index b17e60a094..bb799a9e2e 100644 --- a/util/system/sanitizers.cpp +++ b/util/system/sanitizers.cpp @@ -1,4 +1,4 @@ -#include "sanitizers.h" +#include "sanitizers.h" #include "thread.h" #if defined(_asan_enabled_) diff --git a/util/system/sanitizers.h b/util/system/sanitizers.h index b7d5a72f6f..965e5c751e 100644 --- a/util/system/sanitizers.h +++ b/util/system/sanitizers.h @@ -1,22 +1,22 @@ #pragma once -#include "defaults.h" +#include "defaults.h" + +extern "C" { // sanitizers API -extern "C" { // sanitizers API - #if defined(_asan_enabled_) void __lsan_ignore_object(const void* p); #endif -#if defined(_msan_enabled_) +#if defined(_msan_enabled_) void __msan_unpoison(const volatile void* a, size_t size); void __msan_poison(const volatile void* a, size_t size); void __msan_check_mem_is_initialized(const volatile void* x, size_t size); #endif -}; // sanitizers API - -namespace NSan { +}; // sanitizers API + +namespace NSan { class TFiberContext { public: TFiberContext() noexcept; @@ -54,63 +54,63 @@ namespace NSan { #endif } - // Determines if asan present + // Determines if asan present inline constexpr static bool ASanIsOn() noexcept { -#if defined(_asan_enabled_) - return true; -#else - return false; +#if defined(_asan_enabled_) + return true; +#else + return false; #endif - } + } - // Determines if tsan present + // Determines if tsan present inline constexpr static bool TSanIsOn() noexcept { -#if defined(_tsan_enabled_) - return true; -#else - return false; +#if defined(_tsan_enabled_) + return true; +#else + return false; #endif - } + } - // Determines if msan present + // Determines if msan present inline constexpr static bool MSanIsOn() noexcept { -#if defined(_msan_enabled_) - return true; -#else - return false; +#if defined(_msan_enabled_) + return true; +#else + return false; #endif - } + } - // Make memory region fully initialized (without changing its contents). + // Make memory region fully initialized (without changing its contents). inline static void Unpoison(const volatile void* a, size_t size) noexcept { -#if defined(_msan_enabled_) - __msan_unpoison(a, size); -#else - Y_UNUSED(a); - Y_UNUSED(size); -#endif - } - - // Make memory region fully uninitialized (without changing its contents). - // This is a legacy interface that does not update origin information. Use __msan_allocated_memory() instead. +#if defined(_msan_enabled_) + __msan_unpoison(a, size); +#else + Y_UNUSED(a); + Y_UNUSED(size); +#endif + } + + // Make memory region fully uninitialized (without changing its contents). + // This is a legacy interface that does not update origin information. Use __msan_allocated_memory() instead. inline static void Poison(const volatile void* a, size_t size) noexcept { -#if defined(_msan_enabled_) - __msan_poison(a, size); +#if defined(_msan_enabled_) + __msan_poison(a, size); #else - Y_UNUSED(a); - Y_UNUSED(size); + Y_UNUSED(a); + Y_UNUSED(size); #endif - } + } - // Checks that memory range is fully initialized, and reports an error if it is not. + // Checks that memory range is fully initialized, and reports an error if it is not. inline static void CheckMemIsInitialized(const volatile void* a, size_t size) noexcept { -#if defined(_msan_enabled_) - __msan_check_mem_is_initialized(a, size); +#if defined(_msan_enabled_) + __msan_check_mem_is_initialized(a, size); #else - Y_UNUSED(a); - Y_UNUSED(size); + Y_UNUSED(a); + Y_UNUSED(size); #endif - } + } inline static void MarkAsIntentionallyLeaked(const void* ptr) noexcept { #if defined(_asan_enabled_) diff --git a/util/system/sem.cpp b/util/system/sem.cpp index 5ce3b1e529..4a93b903b5 100644 --- a/util/system/sem.cpp +++ b/util/system/sem.cpp @@ -133,8 +133,8 @@ namespace { #endif } - //The UNIX semaphore object does not support a timed "wait", and - //hence to maintain consistancy, for win32 case we use INFINITE or 0 timeout. + //The UNIX semaphore object does not support a timed "wait", and + //hence to maintain consistancy, for win32 case we use INFINITE or 0 timeout. inline void Acquire() noexcept { #ifdef _win_ Y_VERIFY(::WaitForSingleObject(Handle, INFINITE) == WAIT_OBJECT_0, "can not acquire semaphore"); @@ -154,7 +154,7 @@ namespace { // zero-second time-out interval // WAIT_OBJECT_0: current free count > 0 // WAIT_TIMEOUT: current free count == 0 - return ::WaitForSingleObject(Handle, 0) == WAIT_OBJECT_0; + return ::WaitForSingleObject(Handle, 0) == WAIT_OBJECT_0; #else #ifdef USE_SYSV_SEMAPHORES struct sembuf ops[] = {{0, -1, SEM_UNDO | IPC_NOWAIT}}; diff --git a/util/system/spinlock.h b/util/system/spinlock.h index cd8c6f26a3..af2630890a 100644 --- a/util/system/spinlock.h +++ b/util/system/spinlock.h @@ -27,13 +27,13 @@ protected: }; static inline void SpinLockPause() { -#if defined(__GNUC__) - #if defined(_i386_) || defined(_x86_64_) +#if defined(__GNUC__) + #if defined(_i386_) || defined(_x86_64_) __asm __volatile("pause"); - #elif defined(_arm64_) - __asm __volatile("yield" :: - : "memory"); - #endif + #elif defined(_arm64_) + __asm __volatile("yield" :: + : "memory"); + #endif #endif } |