aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrobot-contrib <robot-contrib@yandex-team.com>2022-08-07 16:30:29 +0300
committerrobot-contrib <robot-contrib@yandex-team.com>2022-08-07 16:30:29 +0300
commita18d511fddf3023584cf873456d21c895fd82d7e (patch)
tree35da32f31ebcd45b96e8b750af5a8b85e834c6ec
parent8b163f003d8db5f6520630da7e2e6b393bfb2b5c (diff)
downloadydb-a18d511fddf3023584cf873456d21c895fd82d7e.tar.gz
Update contrib/restricted/boost/atomic to 1.79.0
-rw-r--r--contrib/restricted/boost/atomic/CMakeLists.txt11
-rw-r--r--contrib/restricted/boost/atomic/README.md16
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic.hpp9
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/atomic.hpp195
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/atomic_flag.hpp15
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/atomic_ref.hpp98
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/capabilities.hpp193
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/addressof.hpp9
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/aligned_variable.hpp57
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag.hpp71
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag_impl.hpp129
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_impl.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_template.hpp)929
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_ref_impl.hpp1226
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_cast.hpp110
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_fp_cast.hpp54
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/capabilities.hpp217
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp46
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp58
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_alpha.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp)8
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_arm.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp)45
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_ppc.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp)25
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_sparc.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp)8
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_x86.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp)28
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_arm.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp)8
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_x86.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp)18
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp39
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp175
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp37
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp7
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp40
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/cas_based_exchange.hpp50
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/classify.hpp90
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/config.hpp90
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations.hpp50
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations_fwd.hpp38
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp1121
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp1909
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_alpha.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp)569
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_arm.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp)914
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp)374
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_sparc.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp)91
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_x86.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp)527
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_arm.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp)120
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_x86.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp)170
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations.hpp49
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated.hpp195
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated_fwd.hpp38
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_fwd.hpp38
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_cas_based.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp)31
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_atomic.hpp306
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_sync.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp)127
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_linux_arm.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp)75
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_windows.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp)73
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extending_cas_based_arithmetic.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp)21
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_operations_fwd.hpp5
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_emulated.hpp35
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_generic.hpp23
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_operations_fwd.hpp5
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_emulated.hpp100
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp1060
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp1330
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp718
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp330
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp334
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_generic.hpp26
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp10
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp211
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_operations.hpp41
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp60
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp58
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp53
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp90
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp68
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp70
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp69
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp66
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp66
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations.hpp41
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations_emulated.hpp50
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_atomic.hpp75
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_sync.hpp53
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_linux_arm.hpp64
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_windows.hpp67
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/footer.hpp24
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_operations_fwd.hpp5
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_emulated.hpp22
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_generic.hpp9
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/futex.hpp154
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_arm_asm_common.hpp79
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_atomic_memory_order_utils.hpp66
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_ppc_asm_common.hpp33
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/header.hpp72
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/int_sizes.hpp26
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_conversions.hpp (renamed from contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_extend.hpp)13
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp46
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/intptr.hpp46
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/lock_pool.hpp151
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/lockpool.hpp51
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/memory_order_utils.hpp47
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/once_flag.hpp43
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/operations.hpp24
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_fwd.hpp35
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_lockfree.hpp30
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_emulated.hpp162
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch32_common.hpp53
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch64_common.hpp53
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp87
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp392
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp5
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp563
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp9
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/pause.hpp28
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/platform.hpp114
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_traits.hpp187
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_type.hpp207
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/alignment_of.hpp51
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/has_unique_object_representations.hpp143
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_enum.hpp42
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_floating_point.hpp5
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp46
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_trivially_copyable.hpp45
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_capabilities.hpp363
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_darwin_ulock.hpp58
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_dragonfly_umtx.hpp30
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_freebsd_umtx.hpp40
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_futex.hpp31
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_windows.hpp57
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_on_address.hpp65
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations.hpp28
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations_fwd.hpp43
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_darwin_ulock.hpp158
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_dragonfly_umtx.hpp75
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_emulated.hpp97
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_freebsd_umtx.hpp119
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_futex.hpp111
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_generic.hpp143
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_windows.hpp179
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/fences.hpp25
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic.hpp91
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_flag.hpp40
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_ref.hpp98
-rw-r--r--contrib/restricted/boost/atomic/include/boost/memory_order.hpp18
-rw-r--r--contrib/restricted/boost/atomic/src/bit_operation_tools.hpp82
-rw-r--r--contrib/restricted/boost/atomic/src/cpuid.hpp86
-rw-r--r--contrib/restricted/boost/atomic/src/find_address.hpp45
-rw-r--r--contrib/restricted/boost/atomic/src/find_address_sse2.cpp284
-rw-r--r--contrib/restricted/boost/atomic/src/find_address_sse41.cpp154
-rw-r--r--contrib/restricted/boost/atomic/src/lock_pool.cpp1414
-rw-r--r--contrib/restricted/boost/atomic/src/lockpool.cpp167
-rw-r--r--contrib/restricted/boost/atomic/src/x86_vector_tools.hpp52
-rw-r--r--contrib/restricted/boost/winapi/include/boost/winapi/critical_section.hpp240
-rw-r--r--contrib/restricted/boost/winapi/include/boost/winapi/detail/cast_ptr.hpp40
152 files changed, 18757 insertions, 4969 deletions
diff --git a/contrib/restricted/boost/atomic/CMakeLists.txt b/contrib/restricted/boost/atomic/CMakeLists.txt
index f31790bc73..c97670ef7a 100644
--- a/contrib/restricted/boost/atomic/CMakeLists.txt
+++ b/contrib/restricted/boost/atomic/CMakeLists.txt
@@ -14,13 +14,22 @@ target_compile_options(restricted-boost-atomic PRIVATE
target_include_directories(restricted-boost-atomic PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/include
)
+target_include_directories(restricted-boost-atomic PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/winapi/include
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/src
+)
target_link_libraries(restricted-boost-atomic PUBLIC
contrib-libs-cxxsupp
yutil
+ restricted-boost-align
restricted-boost-assert
restricted-boost-config
+ restricted-boost-predef
+ restricted-boost-preprocessor
restricted-boost-type_traits
)
target_sources(restricted-boost-atomic PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/src/lockpool.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/src/find_address_sse2.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/src/find_address_sse41.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/boost/atomic/src/lock_pool.cpp
)
diff --git a/contrib/restricted/boost/atomic/README.md b/contrib/restricted/boost/atomic/README.md
index 3eb887534e..b64cbfbf5d 100644
--- a/contrib/restricted/boost/atomic/README.md
+++ b/contrib/restricted/boost/atomic/README.md
@@ -1,6 +1,6 @@
# ![Boost.Atomic](doc/logo.png)
-Boost.Atomic, part of collection of the [Boost C++ Libraries](http://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11.
+Boost.Atomic, part of collection of the [Boost C++ Libraries](https://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11 and later.
### Directories
@@ -12,15 +12,17 @@ Boost.Atomic, part of collection of the [Boost C++ Libraries](http://github.com/
### More information
-* [Documentation](http://boost.org/libs/atomic)
-* [Report bugs](https://svn.boost.org/trac/boost/newticket?component=atomic;version=Boost%20Release%20Branch). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
-* Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
+* [Documentation](https://www.boost.org/libs/atomic)
+* [Report bugs](https://github.com/boostorg/atomic/issues/new). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
+* Submit your patches as [pull requests](https://github.com/boostorg/atomic/compare) against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](https://www.boost.org/LICENSE_1_0.txt).
### Build status
-Master: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/master?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/master) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=master)](https://travis-ci.org/boostorg/atomic)
-Develop: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/develop?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=develop)](https://travis-ci.org/boostorg/atomic)
+Branch | GitHub Actions | AppVeyor | Test Matrix | Dependencies |
+:-------------: | -------------- | -------- | ----------- | ------------ |
+[`master`](https://github.com/boostorg/atomic/tree/master) | [![GitHub Actions](https://github.com/boostorg/atomic/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/boostorg/atomic/actions?query=branch%3Amaster) | [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/master?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/master) | [![Tests](https://img.shields.io/badge/matrix-master-brightgreen.svg)](http://www.boost.org/development/tests/master/developer/atomic.html) | [![Dependencies](https://img.shields.io/badge/deps-master-brightgreen.svg)](https://pdimov.github.io/boostdep-report/master/atomic.html)
+[`develop`](https://github.com/boostorg/atomic/tree/develop) | [![GitHub Actions](https://github.com/boostorg/atomic/actions/workflows/ci.yml/badge.svg?branch=develop)](https://github.com/boostorg/atomic/actions?query=branch%3Adevelop) | [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/develop?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) | [![Tests](https://img.shields.io/badge/matrix-develop-brightgreen.svg)](http://www.boost.org/development/tests/develop/developer/atomic.html) | [![Dependencies](https://img.shields.io/badge/deps-develop-brightgreen.svg)](https://pdimov.github.io/boostdep-report/develop/atomic.html)
### License
-Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
+Distributed under the [Boost Software License, Version 1.0](https://www.boost.org/LICENSE_1_0.txt).
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic.hpp
index cc28b1ab5e..be239b3423 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic.hpp
@@ -2,6 +2,7 @@
#define BOOST_ATOMIC_HPP
// Copyright (c) 2011 Helge Bahmann
+// Copyright (c) 2020 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
@@ -9,7 +10,15 @@
// This header includes all Boost.Atomic public headers
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/atomic.hpp>
+#include <boost/atomic/atomic_ref.hpp>
+#include <boost/atomic/atomic_flag.hpp>
+#include <boost/atomic/ipc_atomic.hpp>
+#include <boost/atomic/ipc_atomic_ref.hpp>
+#include <boost/atomic/ipc_atomic_flag.hpp>
+#include <boost/atomic/fences.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/atomic.hpp
index 5a8058829c..8bd075a9a7 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/atomic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/atomic.hpp
@@ -5,33 +5,195 @@
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/atomic.hpp
*
- * This header contains definition of \c atomic template and \c atomic_flag.
+ * This header contains definition of \c atomic template.
*/
#ifndef BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
-#include <boost/atomic/fences.hpp>
-#include <boost/atomic/atomic_flag.hpp>
-#include <boost/atomic/detail/atomic_template.hpp>
-#include <boost/atomic/detail/operations.hpp>
-#include <boost/atomic/detail/extra_operations.hpp>
-#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-#include <boost/atomic/detail/fp_operations.hpp>
-#include <boost/atomic/detail/extra_fp_operations.hpp>
-#endif
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/classify.hpp>
+#include <boost/atomic/detail/atomic_impl.hpp>
+#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
+#include <boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
+namespace atomics {
+
+//! Atomic object
+template< typename T >
+class atomic :
+ public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type, false >
+{
+private:
+ typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type, false > base_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+ // Deprecated, use value_type instead
+ BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
+ typedef typename base_type::storage_type storage_type;
+
+ BOOST_STATIC_ASSERT_MSG(sizeof(value_type) > 0u, "boost::atomic<T> requires T to be a complete type");
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+ BOOST_STATIC_ASSERT_MSG(atomics::detail::is_trivially_copyable< value_type >::value, "boost::atomic<T> requires T to be a trivially copyable type");
+#endif
+
+public:
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT atomic() BOOST_NOEXCEPT_IF(atomics::detail::is_nothrow_default_constructible< value_type >::value) : base_type()
+ {
+ }
+
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE operator value_type() const volatile BOOST_NOEXCEPT
+ {
+ return this->load();
+ }
+
+ // Deprecated, use value() instead
+ BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
+ BOOST_FORCEINLINE typename base_type::storage_type& storage() BOOST_NOEXCEPT { return base_type::storage(); }
+ BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
+ BOOST_FORCEINLINE typename base_type::storage_type volatile& storage() volatile BOOST_NOEXCEPT { return base_type::storage(); }
+ BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
+ BOOST_FORCEINLINE typename base_type::storage_type const& storage() const BOOST_NOEXCEPT { return base_type::storage(); }
+ BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
+ BOOST_FORCEINLINE typename base_type::storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return base_type::storage(); }
+
+ BOOST_DELETED_FUNCTION(atomic(atomic const&))
+ BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))
+ BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile)
+};
+
+typedef atomic< char > atomic_char;
+typedef atomic< unsigned char > atomic_uchar;
+typedef atomic< signed char > atomic_schar;
+typedef atomic< uint8_t > atomic_uint8_t;
+typedef atomic< int8_t > atomic_int8_t;
+typedef atomic< unsigned short > atomic_ushort;
+typedef atomic< short > atomic_short;
+typedef atomic< uint16_t > atomic_uint16_t;
+typedef atomic< int16_t > atomic_int16_t;
+typedef atomic< unsigned int > atomic_uint;
+typedef atomic< int > atomic_int;
+typedef atomic< uint32_t > atomic_uint32_t;
+typedef atomic< int32_t > atomic_int32_t;
+typedef atomic< unsigned long > atomic_ulong;
+typedef atomic< long > atomic_long;
+typedef atomic< uint64_t > atomic_uint64_t;
+typedef atomic< int64_t > atomic_int64_t;
+#ifdef BOOST_HAS_LONG_LONG
+typedef atomic< boost::ulong_long_type > atomic_ullong;
+typedef atomic< boost::long_long_type > atomic_llong;
+#endif
+typedef atomic< void* > atomic_address;
+typedef atomic< bool > atomic_bool;
+typedef atomic< wchar_t > atomic_wchar_t;
+#if defined(__cpp_char8_t) && __cpp_char8_t >= 201811
+typedef atomic< char8_t > atomic_char8_t;
+#endif
+#if !defined(BOOST_NO_CXX11_CHAR16_T)
+typedef atomic< char16_t > atomic_char16_t;
+#endif
+#if !defined(BOOST_NO_CXX11_CHAR32_T)
+typedef atomic< char32_t > atomic_char32_t;
+#endif
+
+typedef atomic< int_least8_t > atomic_int_least8_t;
+typedef atomic< uint_least8_t > atomic_uint_least8_t;
+typedef atomic< int_least16_t > atomic_int_least16_t;
+typedef atomic< uint_least16_t > atomic_uint_least16_t;
+typedef atomic< int_least32_t > atomic_int_least32_t;
+typedef atomic< uint_least32_t > atomic_uint_least32_t;
+typedef atomic< int_least64_t > atomic_int_least64_t;
+typedef atomic< uint_least64_t > atomic_uint_least64_t;
+typedef atomic< int_fast8_t > atomic_int_fast8_t;
+typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
+typedef atomic< int_fast16_t > atomic_int_fast16_t;
+typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
+typedef atomic< int_fast32_t > atomic_int_fast32_t;
+typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
+typedef atomic< int_fast64_t > atomic_int_fast64_t;
+typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
+typedef atomic< intmax_t > atomic_intmax_t;
+typedef atomic< uintmax_t > atomic_uintmax_t;
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+typedef atomic< float > atomic_float_t;
+typedef atomic< double > atomic_double_t;
+typedef atomic< long double > atomic_long_double_t;
+#endif
+
+typedef atomic< std::size_t > atomic_size_t;
+typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
+
+#if defined(BOOST_HAS_INTPTR_T)
+typedef atomic< boost::intptr_t > atomic_intptr_t;
+typedef atomic< boost::uintptr_t > atomic_uintptr_t;
+#endif
+
+// Select the lock-free atomic types that has natively supported waiting/notifying operations.
+// Prefer 32-bit types the most as those have the best performance on current 32 and 64-bit architectures.
+#if BOOST_ATOMIC_INT32_LOCK_FREE == 2 && BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY == 2
+typedef atomic< uint32_t > atomic_unsigned_lock_free;
+typedef atomic< int32_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT64_LOCK_FREE == 2 && BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY == 2
+typedef atomic< uint64_t > atomic_unsigned_lock_free;
+typedef atomic< int64_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT16_LOCK_FREE == 2 && BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY == 2
+typedef atomic< uint16_t > atomic_unsigned_lock_free;
+typedef atomic< int16_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT8_LOCK_FREE == 2 && BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY == 2
+typedef atomic< uint8_t > atomic_unsigned_lock_free;
+typedef atomic< int8_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT32_LOCK_FREE == 2
+typedef atomic< uint32_t > atomic_unsigned_lock_free;
+typedef atomic< int32_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT64_LOCK_FREE == 2
+typedef atomic< uint64_t > atomic_unsigned_lock_free;
+typedef atomic< int64_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT16_LOCK_FREE == 2
+typedef atomic< uint16_t > atomic_unsigned_lock_free;
+typedef atomic< int16_t > atomic_signed_lock_free;
+#elif BOOST_ATOMIC_INT8_LOCK_FREE == 2
+typedef atomic< uint8_t > atomic_unsigned_lock_free;
+typedef atomic< int8_t > atomic_signed_lock_free;
+#else
+#define BOOST_ATOMIC_DETAIL_NO_LOCK_FREE_TYPEDEFS
+#endif
+
+} // namespace atomics
using atomics::atomic;
@@ -59,6 +221,9 @@ using atomics::atomic_llong;
using atomics::atomic_address;
using atomics::atomic_bool;
using atomics::atomic_wchar_t;
+#if defined(__cpp_char8_t) && __cpp_char8_t >= 201811
+using atomics::atomic_char8_t;
+#endif
#if !defined(BOOST_NO_CXX11_CHAR16_T)
using atomics::atomic_char16_t;
#endif
@@ -99,6 +264,14 @@ using atomics::atomic_intptr_t;
using atomics::atomic_uintptr_t;
#endif
+#if !defined(BOOST_ATOMIC_DETAIL_NO_LOCK_FREE_TYPEDEFS)
+using atomics::atomic_unsigned_lock_free;
+using atomics::atomic_signed_lock_free;
+#endif
+#undef BOOST_ATOMIC_DETAIL_NO_LOCK_FREE_TYPEDEFS
+
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/atomic_flag.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/atomic_flag.hpp
index ac296bcc8e..cab6179ab4 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/atomic_flag.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/atomic_flag.hpp
@@ -5,7 +5,7 @@
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/atomic_flag.hpp
@@ -17,17 +17,26 @@
#define BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
#include <boost/atomic/capabilities.hpp>
-#include <boost/atomic/detail/operations.hpp>
-#include <boost/atomic/detail/atomic_flag.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/atomic_flag_impl.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
+namespace atomics {
+
+//! Atomic flag
+typedef atomics::detail::atomic_flag_impl< false > atomic_flag;
+
+} // namespace atomics
using atomics::atomic_flag;
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/atomic_ref.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/atomic_ref.hpp
new file mode 100644
index 0000000000..02090d51fb
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/atomic_ref.hpp
@@ -0,0 +1,98 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020-2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/atomic_ref.hpp
+ *
+ * This header contains definition of \c atomic_ref template.
+ */
+
+#ifndef BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_
+#define BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_
+
+#include <boost/assert.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/classify.hpp>
+#include <boost/atomic/detail/atomic_ref_impl.hpp>
+#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+
+//! Atomic reference to external object
+template< typename T >
+class atomic_ref :
+ public atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, false >
+{
+private:
+ typedef atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, false > base_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+ BOOST_STATIC_ASSERT_MSG(sizeof(value_type) > 0u, "boost::atomic_ref<T> requires T to be a complete type");
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+ BOOST_STATIC_ASSERT_MSG(atomics::detail::is_trivially_copyable< value_type >::value, "boost::atomic_ref<T> requires T to be a trivially copyable type");
+#endif
+
+private:
+ typedef typename base_type::storage_type storage_type;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(atomic_ref(atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ // Check that referenced object alignment satisfies required alignment
+ BOOST_ASSERT((((atomics::detail::uintptr_t)this->m_value) & (base_type::required_alignment - 1u)) == 0u);
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) const BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE operator value_type() const BOOST_NOEXCEPT
+ {
+ return this->load();
+ }
+
+ BOOST_DELETED_FUNCTION(atomic_ref& operator= (atomic_ref const&))
+};
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX17_DEDUCTION_GUIDES)
+template< typename T >
+atomic_ref(T&) -> atomic_ref< T >;
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX17_DEDUCTION_GUIDES)
+
+//! Atomic reference factory function
+template< typename T >
+BOOST_FORCEINLINE atomic_ref< T > make_atomic_ref(T& value) BOOST_NOEXCEPT
+{
+ return atomic_ref< T >(value);
+}
+
+} // namespace atomics
+
+using atomics::atomic_ref;
+using atomics::make_atomic_ref;
+
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/capabilities.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/capabilities.hpp
index 5c7434d9bd..a17bde4cfc 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/capabilities.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/capabilities.hpp
@@ -15,196 +15,7 @@
#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/platform.hpp>
-#include <boost/atomic/detail/int_sizes.hpp>
-#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-#include <boost/atomic/detail/float_sizes.hpp>
-#endif
-
-#if !defined(BOOST_ATOMIC_EMULATED)
-#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/caps_)
-#endif
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
-#define BOOST_ATOMIC_INT8_LOCK_FREE 0
-#endif
-
-#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
-#define BOOST_ATOMIC_INT16_LOCK_FREE 0
-#endif
-
-#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
-#define BOOST_ATOMIC_INT32_LOCK_FREE 0
-#endif
-
-#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
-#define BOOST_ATOMIC_INT64_LOCK_FREE 0
-#endif
-
-#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
-#define BOOST_ATOMIC_INT128_LOCK_FREE 0
-#endif
-
-
-#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
-#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#endif
-
-#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
-#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#endif
-
-#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
-#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#endif
-
-#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
-#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#else
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
-#endif
-#endif
-
-#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
-#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
-#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
-#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
-#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
-#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#else
-#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
-#endif
-#endif
-
-#ifndef BOOST_ATOMIC_INT_LOCK_FREE
-#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
-#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
-#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
-#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
-#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#else
-#define BOOST_ATOMIC_INT_LOCK_FREE 0
-#endif
-#endif
-
-#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
-#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
-#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
-#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
-#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
-#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#else
-#define BOOST_ATOMIC_LONG_LOCK_FREE 0
-#endif
-#endif
-
-#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
-#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
-#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
-#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
-#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
-#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#else
-#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
-#endif
-#endif
-
-#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
-#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
-#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
-#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#else
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
-#endif
-#endif
-
-#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
-
-#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
-// We store bools in 1-byte storage in all backends
-#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
-#endif
-
-#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
-#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_BOOL_LOCK_FREE
-#endif
-
-#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-
-#if !defined(BOOST_ATOMIC_FLOAT_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
-#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
-#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
-#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
-#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 16
-#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
-#else
-#define BOOST_ATOMIC_FLOAT_LOCK_FREE 0
-#endif
-#endif
-
-#if !defined(BOOST_ATOMIC_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
-#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
-#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
-#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
-#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 16
-#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
-#else
-#define BOOST_ATOMIC_DOUBLE_LOCK_FREE 0
-#endif
-#endif
-
-#if !defined(BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
-#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
-#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
-#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
-#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 16
-#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
-#else
-#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE 0
-#endif
-#endif
-
-#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-
-#ifndef BOOST_ATOMIC_THREAD_FENCE
-#define BOOST_ATOMIC_THREAD_FENCE 0
-#endif
-
-#ifndef BOOST_ATOMIC_SIGNAL_FENCE
-#define BOOST_ATOMIC_SIGNAL_FENCE 0
-#endif
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/wait_capabilities.hpp>
#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/addressof.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/addressof.hpp
index 38e876e317..0bb551bc40 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/addressof.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/addressof.hpp
@@ -16,6 +16,7 @@
#define BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -37,7 +38,11 @@ namespace atomics {
namespace detail {
template< typename T >
-BOOST_FORCEINLINE T* addressof(T& value) BOOST_NOEXCEPT
+BOOST_FORCEINLINE
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
+BOOST_CONSTEXPR
+#endif
+T* addressof(T& value) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
return __builtin_addressof(value);
@@ -55,4 +60,6 @@ BOOST_FORCEINLINE T* addressof(T& value) BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/aligned_variable.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/aligned_variable.hpp
new file mode 100644
index 0000000000..feae43c0bb
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/aligned_variable.hpp
@@ -0,0 +1,57 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/aligned_variable.hpp
+ *
+ * This header defines a convenience macro for declaring aligned variables
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+#include <boost/config/helper_macros.hpp>
+#include <boost/type_traits/type_with_alignment.hpp>
+#endif
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+
+#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
+ alignas(var_alignment) var_type var_name
+
+#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
+ alignas(var_alignment) var_type var_name
+
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+
+// Note: Some compilers cannot use constant expressions in alignment attributes or alignas, so we have to use the union trick
+#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
+ union \
+ { \
+ var_type var_name; \
+ boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
+ }
+
+#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
+ union \
+ { \
+ var_type var_name; \
+ typename boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
+ }
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag.hpp
deleted file mode 100644
index 6f5fc8acc3..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/atomic_flag.hpp
- *
- * This header contains interface definition of \c atomic_flag.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
-
-#include <boost/assert.hpp>
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/operations_lockfree.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-/*
- * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
- * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
- */
-
-namespace boost {
-namespace atomics {
-
-#if defined(BOOST_NO_CXX11_CONSTEXPR) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)
-#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
-#else
-#define BOOST_ATOMIC_FLAG_INIT {}
-#endif
-
-struct atomic_flag
-{
- typedef atomics::detail::operations< 1u, false > operations;
- typedef operations::storage_type storage_type;
-
- operations::aligned_storage_type m_storage;
-
- BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0)
- {
- }
-
- BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
- {
- return operations::test_and_set(m_storage.value, order);
- }
-
- BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
- {
- BOOST_ASSERT(order != memory_order_consume);
- BOOST_ASSERT(order != memory_order_acquire);
- BOOST_ASSERT(order != memory_order_acq_rel);
- operations::clear(m_storage.value, order);
- }
-
- BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
- BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))
-};
-
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag_impl.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag_impl.hpp
new file mode 100644
index 0000000000..a79d4adf0d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_flag_impl.hpp
@@ -0,0 +1,129 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/atomic_flag_impl.hpp
+ *
+ * This header contains implementation of \c atomic_flag.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
+
+#include <boost/assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/core_operations.hpp>
+#include <boost/atomic/detail/wait_operations.hpp>
+#include <boost/atomic/detail/aligned_variable.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+/*
+ * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
+ * see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
+ */
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)
+#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
+#else
+#define BOOST_ATOMIC_FLAG_INIT {}
+#endif
+
+//! Atomic flag implementation
+template< bool IsInterprocess >
+struct atomic_flag_impl
+{
+ // Prefer 4-byte storage as most platforms support waiting/notifying operations without a lock pool for 32-bit integers
+ typedef atomics::detail::core_operations< 4u, false, IsInterprocess > core_operations;
+ typedef atomics::detail::wait_operations< core_operations > wait_operations;
+ typedef typename core_operations::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = core_operations::is_always_lock_free;
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = wait_operations::always_has_native_wait_notify;
+
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(core_operations::storage_alignment, storage_type, m_storage);
+
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT atomic_flag_impl() BOOST_NOEXCEPT : m_storage(0u)
+ {
+ }
+
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ return is_always_lock_free;
+ }
+
+ BOOST_FORCEINLINE bool has_native_wait_notify() const volatile BOOST_NOEXCEPT
+ {
+ return wait_operations::has_native_wait_notify(m_storage);
+ }
+
+ BOOST_FORCEINLINE bool test(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+ return !!core_operations::load(m_storage, order);
+ }
+
+ BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return core_operations::test_and_set(m_storage, order);
+ }
+
+ BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+ core_operations::clear(m_storage, order);
+ }
+
+ BOOST_FORCEINLINE bool wait(bool old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return !!wait_operations::wait(m_storage, static_cast< storage_type >(old_val), order);
+ }
+
+ BOOST_FORCEINLINE void notify_one() volatile BOOST_NOEXCEPT
+ {
+ wait_operations::notify_one(m_storage);
+ }
+
+ BOOST_FORCEINLINE void notify_all() volatile BOOST_NOEXCEPT
+ {
+ wait_operations::notify_all(m_storage);
+ }
+
+ BOOST_DELETED_FUNCTION(atomic_flag_impl(atomic_flag_impl const&))
+ BOOST_DELETED_FUNCTION(atomic_flag_impl& operator= (atomic_flag_impl const&))
+};
+
+#if defined(BOOST_NO_CXX17_INLINE_VARIABLES)
+template< bool IsInterprocess >
+BOOST_CONSTEXPR_OR_CONST bool atomic_flag_impl< IsInterprocess >::is_always_lock_free;
+template< bool IsInterprocess >
+BOOST_CONSTEXPR_OR_CONST bool atomic_flag_impl< IsInterprocess >::always_has_native_wait_notify;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_template.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_impl.hpp
index fb0a8f58f0..0fd601bd80 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_template.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_impl.hpp
@@ -5,202 +5,213 @@
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014-2020 Andrey Semashev
*/
/*!
- * \file atomic/detail/atomic_template.hpp
+ * \file atomic/detail/atomic_impl.hpp
*
- * This header contains interface definition of \c atomic template.
+ * This header contains implementation of \c atomic template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_IMPL_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ATOMIC_IMPL_HPP_INCLUDED_
#include <cstddef>
-#include <boost/cstdint.hpp>
#include <boost/assert.hpp>
+#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
-#include <boost/atomic/detail/integral_extend.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/integral_conversions.hpp>
+#include <boost/atomic/detail/core_operations.hpp>
+#include <boost/atomic/detail/wait_operations.hpp>
+#include <boost/atomic/detail/extra_operations.hpp>
+#include <boost/atomic/detail/memory_order_utils.hpp>
+#include <boost/atomic/detail/aligned_variable.hpp>
#include <boost/atomic/detail/type_traits/is_signed.hpp>
-#include <boost/atomic/detail/type_traits/is_integral.hpp>
-#include <boost/atomic/detail/type_traits/is_function.hpp>
-#include <boost/atomic/detail/type_traits/is_floating_point.hpp>
+#include <boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp>
#include <boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp>
+#include <boost/atomic/detail/type_traits/alignment_of.hpp>
#include <boost/atomic/detail/type_traits/conditional.hpp>
#include <boost/atomic/detail/type_traits/integral_constant.hpp>
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
-#include <boost/atomic/detail/fp_operations_fwd.hpp>
-#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
+#include <boost/atomic/detail/fp_operations.hpp>
+#include <boost/atomic/detail/extra_fp_operations.hpp>
#endif
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// 'boost::atomics::atomic<T>' : multiple assignment operators specified
-#pragma warning(disable: 4522)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT) && !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST)
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR BOOST_CONSTEXPR
+#else
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR
#endif
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
- * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
+ * see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
*/
namespace boost {
namespace atomics {
namespace detail {
-BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT
-{
- return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);
-}
-
-BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+template< typename T, bool Signed, bool Interprocess >
+class base_atomic_common
{
- // 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
- // Given the enum values we can test the strength of memory order requirements with this single condition.
- return (static_cast< unsigned int >(failure_order) & 15u) <= (static_cast< unsigned int >(success_order) & 15u);
-}
+public:
+ typedef T value_type;
-template< typename T, bool IsFunction = atomics::detail::is_function< T >::value >
-struct classify_pointer
-{
- typedef void* type;
-};
+protected:
+ typedef atomics::detail::core_operations< storage_size_of< value_type >::value, Signed, Interprocess > core_operations;
+ typedef atomics::detail::wait_operations< core_operations > wait_operations;
+ typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
+ typedef typename core_operations::storage_type storage_type;
-template< typename T >
-struct classify_pointer< T, true >
-{
- typedef void type;
-};
+protected:
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment ? core_operations::storage_alignment : atomics::detail::alignment_of< value_type >::value;
-template< typename T, bool IsInt = atomics::detail::is_integral< T >::value, bool IsFloat = atomics::detail::is_floating_point< T >::value >
-struct classify
-{
- typedef void type;
-};
+public:
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = core_operations::is_always_lock_free;
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = wait_operations::always_has_native_wait_notify;
-template< typename T >
-struct classify< T, true, false > { typedef int type; };
+protected:
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(storage_alignment, storage_type, m_storage);
-#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-template< typename T >
-struct classify< T, false, true > { typedef float type; };
-#endif
+public:
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT base_atomic_common() BOOST_NOEXCEPT : m_storage()
+ {
+ }
-template< typename T >
-struct classify< T*, false, false > { typedef typename classify_pointer< T >::type type; };
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT explicit base_atomic_common(storage_type v) BOOST_NOEXCEPT : m_storage(v)
+ {
+ }
-template< >
-struct classify< void*, false, false > { typedef void type; };
+ BOOST_FORCEINLINE value_type& value() BOOST_NOEXCEPT { return *reinterpret_cast< value_type* >(&m_storage); }
+ BOOST_FORCEINLINE value_type volatile& value() volatile BOOST_NOEXCEPT { return *reinterpret_cast< volatile value_type* >(&m_storage); }
+ BOOST_FORCEINLINE value_type const& value() const BOOST_NOEXCEPT { return *reinterpret_cast< const value_type* >(&m_storage); }
+ BOOST_FORCEINLINE value_type const volatile& value() const volatile BOOST_NOEXCEPT { return *reinterpret_cast< const volatile value_type* >(&m_storage); }
-template< >
-struct classify< const void*, false, false > { typedef void type; };
+protected:
+ BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return m_storage; }
+ BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return m_storage; }
+ BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return m_storage; }
+ BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return m_storage; }
-template< >
-struct classify< volatile void*, false, false > { typedef void type; };
+public:
+ BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
+ {
+ // C++17 requires all instances of atomic<> return a value consistent with is_always_lock_free here.
+ // Boost.Atomic also enforces the required alignment of the atomic storage, so we can always return is_always_lock_free.
+ return is_always_lock_free;
+ }
-template< >
-struct classify< const volatile void*, false, false > { typedef void type; };
+ BOOST_FORCEINLINE bool has_native_wait_notify() const volatile BOOST_NOEXCEPT
+ {
+ return wait_operations::has_native_wait_notify(this->storage());
+ }
-template< typename T, typename U >
-struct classify< T U::*, false, false > { typedef void type; };
+ BOOST_FORCEINLINE void notify_one() volatile BOOST_NOEXCEPT
+ {
+ wait_operations::notify_one(this->storage());
+ }
+ BOOST_FORCEINLINE void notify_all() volatile BOOST_NOEXCEPT
+ {
+ wait_operations::notify_all(this->storage());
+ }
+};
-#if defined(BOOST_INTEL) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40700) ||\
- (defined(BOOST_CLANG) && !defined(__apple_build_version__) && ((__clang_major__+0) * 100 + (__clang_minor__+0)) < 302) ||\
- (defined(__clang__) && defined(__apple_build_version__) && ((__clang_major__+0) * 100 + (__clang_minor__+0)) < 402)
-// Intel compiler (at least 18.0 update 1) breaks if noexcept specification is used in defaulted function declarations:
-// error: the default constructor of "boost::atomics::atomic<T>" cannot be referenced -- it is a deleted function
-// GCC 4.6 doesn't seem to support that either. Clang 3.1 deduces wrong noexcept for the defaulted function and fails as well.
-#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
-#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL BOOST_NOEXCEPT
-#else
-#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL BOOST_NOEXCEPT
-#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
+#if defined(BOOST_NO_CXX17_INLINE_VARIABLES)
+template< typename T, bool Signed, bool Interprocess >
+BOOST_CONSTEXPR_OR_CONST bool base_atomic_common< T, Signed, Interprocess >::is_always_lock_free;
+template< typename T, bool Signed, bool Interprocess >
+BOOST_CONSTEXPR_OR_CONST bool base_atomic_common< T, Signed, Interprocess >::always_has_native_wait_notify;
#endif
-template< typename T, bool IsTriviallyDefaultConstructible = atomics::detail::is_trivially_default_constructible< T >::value >
+
+template< typename T, bool Interprocess, bool IsTriviallyDefaultConstructible = atomics::detail::is_trivially_default_constructible< T >::value >
class base_atomic_generic;
-template< typename T >
-class base_atomic_generic< T, true >
+template< typename T, bool Interprocess >
+class base_atomic_generic< T, Interprocess, true > :
+ public base_atomic_common< T, false, Interprocess >
{
-public:
- typedef T value_type;
-
-protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
- typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
-
-public:
- typedef typename operations::storage_type storage_type;
+private:
+ typedef base_atomic_common< T, false, Interprocess > base_type;
protected:
- typename operations::aligned_storage_type m_storage;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::value_arg_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic_generic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE explicit base_atomic_generic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR explicit base_atomic_generic(value_arg_type v) BOOST_NOEXCEPT :
+ base_type(atomics::detail::bitwise_cast< storage_type >(v))
{
}
};
-template< typename T >
-class base_atomic_generic< T, false >
+template< typename T, bool Interprocess >
+class base_atomic_generic< T, Interprocess, false > :
+ public base_atomic_common< T, false, Interprocess >
{
-public:
- typedef T value_type;
-
-protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
- typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
+private:
+ typedef base_atomic_common< T, false, Interprocess > base_type;
public:
- typedef typename operations::storage_type storage_type;
+ typedef typename base_type::value_type value_type;
protected:
- typename operations::aligned_storage_type m_storage;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::value_arg_type value_arg_type;
public:
- BOOST_FORCEINLINE explicit base_atomic_generic(value_arg_type v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR explicit base_atomic_generic(value_arg_type v = value_type()) BOOST_NOEXCEPT :
+ base_type(atomics::detail::bitwise_cast< storage_type >(v))
{
}
};
-template< typename T, typename Kind >
+template< typename T, typename Kind, bool Interprocess >
class base_atomic;
-//! General template. Implementation for user-defined types, such as structs and enums, and pointers to non-object types
-template< typename T >
-class base_atomic< T, void > :
- public base_atomic_generic< T >
+//! General template. Implementation for user-defined types, such as structs, and pointers to non-object types
+template< typename T, bool Interprocess >
+class base_atomic< T, void, Interprocess > :
+ public base_atomic_generic< T, Interprocess >
{
private:
- typedef base_atomic_generic< T > base_type;
+ typedef base_atomic_generic< T, Interprocess > base_type;
public:
typedef typename base_type::value_type value_type;
- typedef typename base_type::storage_type storage_type;
protected:
- typedef typename base_type::operations operations;
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
typedef typename base_type::value_arg_type value_arg_type;
private:
- typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
public:
- BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v)
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR base_atomic() BOOST_NOEXCEPT_IF(atomics::detail::is_nothrow_default_constructible< value_type >::value) : base_type()
+ {
+ }
+
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v)
{
}
@@ -210,7 +221,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(this->m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
+ core_operations::store(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -218,12 +229,12 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::bitwise_cast< value_type >(operations::load(this->m_storage.value, order));
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
}
BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(operations::exchange(this->m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -232,7 +243,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -246,7 +257,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -254,40 +265,160 @@ public:
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_cast< storage_type >(old_val), order));
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(this->m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(this->m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ }
+
BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(this->m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+
+//! Implementation for enums
+template< typename T, bool Interprocess >
+class base_atomic< T, const int, Interprocess > :
+ public base_atomic_common< T, false, Interprocess >
+{
+private:
+ typedef base_atomic_common< T, false, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(static_cast< storage_type >(v))
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
- storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(this->m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -295,30 +426,34 @@ private:
//! Implementation for integers
-template< typename T >
-class base_atomic< T, int >
+template< typename T, bool Interprocess >
+class base_atomic< T, int, Interprocess > :
+ public base_atomic_common< T, atomics::detail::is_signed< T >::value, Interprocess >
{
+private:
+ typedef base_atomic_common< T, atomics::detail::is_signed< T >::value, Interprocess > base_type;
+
public:
- typedef T value_type;
- typedef T difference_type;
+ typedef typename base_type::value_type value_type;
+ typedef value_type difference_type;
protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, atomics::detail::is_signed< T >::value > operations;
- typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef typename base_type::storage_type storage_type;
typedef value_type value_arg_type;
-public:
- typedef typename operations::storage_type storage_type;
-
private:
- typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
-
-protected:
- typename operations::aligned_storage_type m_storage;
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
public:
BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(static_cast< storage_type >(v)) {}
// Standard methods
BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -327,7 +462,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -335,22 +470,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::integral_truncate< value_type >(operations::load(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::load(this->storage(), order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::fetch_add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::fetch_add(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::fetch_sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::fetch_sub(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::exchange(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::exchange(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -359,7 +494,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -373,7 +508,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -383,156 +518,151 @@ public:
BOOST_FORCEINLINE value_type fetch_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::fetch_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::fetch_and(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::fetch_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::fetch_or(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(operations::fetch_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(core_operations::fetch_xor(this->storage(), static_cast< storage_type >(v), order));
}
// Boost.Atomic extensions
BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_negate(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_negate(this->storage(), order));
}
BOOST_FORCEINLINE value_type fetch_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_complement(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_complement(this->storage(), order));
}
BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::add(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::sub(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::negate(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::negate(this->storage(), order));
}
BOOST_FORCEINLINE value_type bitwise_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_and(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type bitwise_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_or(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type bitwise_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_xor(this->storage(), static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type bitwise_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_complement(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_complement(this->storage(), order));
}
BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ extra_operations::opaque_add(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ extra_operations::opaque_sub(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_negate(m_storage.value, order);
+ extra_operations::opaque_negate(this->storage(), order);
}
BOOST_FORCEINLINE void opaque_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ extra_operations::opaque_and(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ extra_operations::opaque_or(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ extra_operations::opaque_xor(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_complement(m_storage.value, order);
+ extra_operations::opaque_complement(this->storage(), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::add_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ return extra_operations::add_and_test(this->storage(), static_cast< storage_type >(v), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::sub_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ return extra_operations::sub_and_test(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE bool negate_and_test(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::negate_and_test(m_storage.value, order);
+ return extra_operations::negate_and_test(this->storage(), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool and_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::and_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ return extra_operations::and_and_test(this->storage(), static_cast< storage_type >(v), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool or_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::or_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ return extra_operations::or_and_test(this->storage(), static_cast< storage_type >(v), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool xor_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::xor_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ return extra_operations::xor_and_test(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE bool complement_and_test(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::complement_and_test(m_storage.value, order);
+ return extra_operations::complement_and_test(this->storage(), order);
}
BOOST_FORCEINLINE bool bit_test_and_set(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
- return extra_operations::bit_test_and_set(m_storage.value, bit_number, order);
+ return extra_operations::bit_test_and_set(this->storage(), bit_number, order);
}
BOOST_FORCEINLINE bool bit_test_and_reset(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
- return extra_operations::bit_test_and_reset(m_storage.value, bit_number, order);
+ return extra_operations::bit_test_and_reset(this->storage(), bit_number, order);
}
BOOST_FORCEINLINE bool bit_test_and_complement(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
- return extra_operations::bit_test_and_complement(m_storage.value, bit_number, order);
+ return extra_operations::bit_test_and_complement(this->storage(), bit_number, order);
}
// Operators
@@ -581,68 +711,72 @@ public:
return bitwise_xor(v);
}
+ BOOST_FORCEINLINE value_type wait(value_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::integral_truncate< value_type >(wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order));
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
- storage_type old_value = atomics::detail::integral_extend< operations::is_signed, storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::integral_truncate< value_type >(old_value);
return res;
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
- storage_type old_value = atomics::detail::integral_extend< operations::is_signed, storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::integral_truncate< value_type >(old_value);
return res;
}
};
//! Implementation for bool
-template< >
-class base_atomic< bool, int >
+template< bool Interprocess >
+class base_atomic< bool, int, Interprocess > :
+ public base_atomic_common< bool, false, Interprocess >
{
+private:
+ typedef base_atomic_common< bool, false, Interprocess > base_type;
+
public:
- typedef bool value_type;
+ typedef typename base_type::value_type value_type;
protected:
- typedef atomics::detail::operations< 1u, false > operations;
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
typedef value_type value_arg_type;
-public:
- typedef operations::storage_type storage_type;
-
private:
- typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
-
-protected:
- operations::aligned_storage_type m_storage;
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
public:
BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(static_cast< storage_type >(v)) {}
// Standard methods
BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -651,7 +785,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, static_cast< storage_type >(v), order);
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -659,12 +793,12 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return !!operations::load(m_storage.value, order);
+ return !!core_operations::load(this->storage(), order);
}
BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return !!operations::exchange(m_storage.value, static_cast< storage_type >(v), order);
+ return !!core_operations::exchange(this->storage(), static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -673,7 +807,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -687,7 +821,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -695,40 +829,40 @@ public:
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
+ BOOST_FORCEINLINE value_type wait(value_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return !!wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order);
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
@@ -738,32 +872,41 @@ private:
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
//! Implementation for floating point types
-template< typename T >
-class base_atomic< T, float >
+template< typename T, bool Interprocess >
+class base_atomic< T, float, Interprocess > :
+ public base_atomic_common< T, false, Interprocess >
{
+private:
+ typedef base_atomic_common< T, false, Interprocess > base_type;
+
public:
- typedef T value_type;
- typedef T difference_type;
+ typedef typename base_type::value_type value_type;
+ typedef value_type difference_type;
protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
- typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
- typedef atomics::detail::fp_operations< extra_operations, value_type, operations::storage_size > fp_operations;
- typedef atomics::detail::extra_fp_operations< fp_operations, value_type, operations::storage_size > extra_fp_operations;
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef atomics::detail::fp_operations< extra_operations, value_type > fp_operations;
+ typedef atomics::detail::extra_fp_operations< fp_operations > extra_fp_operations;
+ typedef typename base_type::storage_type storage_type;
typedef value_type value_arg_type;
-public:
- typedef typename operations::storage_type storage_type;
-
private:
- typedef atomics::detail::integral_constant< bool, atomics::detail::value_sizeof< value_type >::value == sizeof(storage_type) > value_matches_storage;
-
-protected:
- typename operations::aligned_storage_type m_storage;
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool,
+ atomics::detail::value_size_of< value_type >::value != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment
+ > cxchg_use_bitwise_cast;
+#endif
public:
BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_fp_cast< storage_type >(v)) {}
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT :
+ base_type(atomics::detail::bitwise_fp_cast< storage_type >(v))
+ {
+ }
BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
@@ -771,7 +914,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, atomics::detail::bitwise_fp_cast< storage_type >(v), order);
+ core_operations::store(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -779,22 +922,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::bitwise_fp_cast< value_type >(operations::load(m_storage.value, order));
+ return atomics::detail::bitwise_fp_cast< value_type >(core_operations::load(this->storage(), order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return fp_operations::fetch_add(m_storage.value, v, order);
+ return fp_operations::fetch_add(this->storage(), v, order);
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return fp_operations::fetch_sub(m_storage.value, v, order);
+ return fp_operations::fetch_sub(this->storage(), v, order);
}
BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_fp_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_fp_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_fp_cast< value_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -803,7 +946,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -817,7 +960,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -828,37 +971,37 @@ public:
// Boost.Atomic extensions
BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_fp_operations::fetch_negate(m_storage.value, order);
+ return extra_fp_operations::fetch_negate(this->storage(), order);
}
BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_fp_operations::add(m_storage.value, v, order);
+ return extra_fp_operations::add(this->storage(), v, order);
}
BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_fp_operations::sub(m_storage.value, v, order);
+ return extra_fp_operations::sub(this->storage(), v, order);
}
BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_fp_operations::negate(m_storage.value, order);
+ return extra_fp_operations::negate(this->storage(), order);
}
BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_fp_operations::opaque_add(m_storage.value, v, order);
+ extra_fp_operations::opaque_add(this->storage(), v, order);
}
BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_fp_operations::opaque_sub(m_storage.value, v, order);
+ extra_fp_operations::opaque_sub(this->storage(), v, order);
}
BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_fp_operations::opaque_negate(m_storage.value, order);
+ extra_fp_operations::opaque_negate(this->storage(), order);
}
// Operators
@@ -872,40 +1015,40 @@ public:
return sub(v);
}
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_fp_cast< value_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(old_val), order));
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
return res;
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
return res;
}
@@ -915,38 +1058,39 @@ private:
//! Implementation for pointers to object types
-template< typename T >
-class base_atomic< T*, void* >
+template< typename T, bool Interprocess >
+class base_atomic< T*, void*, Interprocess > :
+ public base_atomic_common< T*, false, Interprocess >
{
+private:
+ typedef base_atomic_common< T*, false, Interprocess > base_type;
+
public:
- typedef T* value_type;
+ typedef typename base_type::value_type value_type;
typedef std::ptrdiff_t difference_type;
protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
- typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef typename base_type::storage_type storage_type;
typedef value_type value_arg_type;
-public:
- typedef typename operations::storage_type storage_type;
-
private:
- typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
-
- // uintptr_storage_type is the minimal storage type that is enough to store pointers. The actual storage_type theoretically may be larger,
- // if the target architecture only supports atomic ops on larger data. Typically, though, they are the same type.
-#if defined(BOOST_HAS_INTPTR_T)
- typedef uintptr_t uintptr_storage_type;
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
#else
- typedef typename atomics::detail::make_storage_type< sizeof(value_type) >::type uintptr_storage_type;
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) != sizeof(storage_type) || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
#endif
-protected:
- typename operations::aligned_storage_type m_storage;
+ // uintptr_storage_type is the minimal storage type that is enough to store pointers. The actual storage_type theoretically may be larger,
+ // if the target architecture only supports atomic ops on larger data. Typically, though, they are the same type.
+ typedef atomics::detail::uintptr_t uintptr_storage_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< uintptr_storage_type >(v))
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_ATOMIC_CTOR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT :
+ base_type(atomics::detail::bitwise_cast< uintptr_storage_type >(v))
{
}
@@ -957,7 +1101,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, atomics::detail::bitwise_cast< uintptr_storage_type >(v), order);
+ core_operations::store(this->storage(), atomics::detail::bitwise_cast< uintptr_storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -965,22 +1109,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::load(m_storage.value, order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(core_operations::load(this->storage(), order)));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::fetch_add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(core_operations::fetch_add(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::fetch_sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(core_operations::fetch_sub(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< uintptr_storage_type >(v), order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_cast< uintptr_storage_type >(v), order)));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -989,7 +1133,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -1003,7 +1147,7 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -1014,34 +1158,32 @@ public:
// Boost.Atomic extensions
BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::add(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::sub(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
+ extra_operations::opaque_add(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
+ extra_operations::opaque_sub(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::add_and_test(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
+ return extra_operations::add_and_test(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
- BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::sub_and_test(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
+ return extra_operations::sub_and_test(this->storage(), static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
// Operators
@@ -1075,174 +1217,49 @@ public:
return sub(v);
}
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_cast< uintptr_storage_type >(old_val), order)));
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_cast< uintptr_storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(old_value));
return res;
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
-#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
-#endif
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_cast< uintptr_storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(old_value));
return res;
}
};
} // namespace detail
-
-template< typename T >
-class atomic :
- public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type >
-{
-private:
- typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > base_type;
- typedef typename base_type::value_arg_type value_arg_type;
-
-public:
- typedef typename base_type::value_type value_type;
- typedef typename base_type::storage_type storage_type;
-
-public:
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = base_type::operations::is_always_lock_free;
-
-public:
- BOOST_DEFAULTED_FUNCTION(atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}
-
- BOOST_FORCEINLINE value_type operator= (value_arg_type v) BOOST_NOEXCEPT
- {
- this->store(v);
- return v;
- }
-
- BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
- {
- this->store(v);
- return v;
- }
-
- BOOST_FORCEINLINE operator value_type() const volatile BOOST_NOEXCEPT
- {
- return this->load();
- }
-
- BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
- {
- // C++17 requires all instances of atomic<> return a value consistent with is_always_lock_free here
- return is_always_lock_free;
- }
-
- BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage.value; }
- BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage.value; }
- BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage.value; }
- BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage.value; }
-
- BOOST_DELETED_FUNCTION(atomic(atomic const&))
- BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))
- BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile)
-};
-
-template< typename T >
-BOOST_CONSTEXPR_OR_CONST bool atomic< T >::is_always_lock_free;
-
-#undef BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
-#undef BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
-
-typedef atomic< char > atomic_char;
-typedef atomic< unsigned char > atomic_uchar;
-typedef atomic< signed char > atomic_schar;
-typedef atomic< uint8_t > atomic_uint8_t;
-typedef atomic< int8_t > atomic_int8_t;
-typedef atomic< unsigned short > atomic_ushort;
-typedef atomic< short > atomic_short;
-typedef atomic< uint16_t > atomic_uint16_t;
-typedef atomic< int16_t > atomic_int16_t;
-typedef atomic< unsigned int > atomic_uint;
-typedef atomic< int > atomic_int;
-typedef atomic< uint32_t > atomic_uint32_t;
-typedef atomic< int32_t > atomic_int32_t;
-typedef atomic< unsigned long > atomic_ulong;
-typedef atomic< long > atomic_long;
-typedef atomic< uint64_t > atomic_uint64_t;
-typedef atomic< int64_t > atomic_int64_t;
-#ifdef BOOST_HAS_LONG_LONG
-typedef atomic< boost::ulong_long_type > atomic_ullong;
-typedef atomic< boost::long_long_type > atomic_llong;
-#endif
-typedef atomic< void* > atomic_address;
-typedef atomic< bool > atomic_bool;
-typedef atomic< wchar_t > atomic_wchar_t;
-#if !defined(BOOST_NO_CXX11_CHAR16_T)
-typedef atomic< char16_t > atomic_char16_t;
-#endif
-#if !defined(BOOST_NO_CXX11_CHAR32_T)
-typedef atomic< char32_t > atomic_char32_t;
-#endif
-
-typedef atomic< int_least8_t > atomic_int_least8_t;
-typedef atomic< uint_least8_t > atomic_uint_least8_t;
-typedef atomic< int_least16_t > atomic_int_least16_t;
-typedef atomic< uint_least16_t > atomic_uint_least16_t;
-typedef atomic< int_least32_t > atomic_int_least32_t;
-typedef atomic< uint_least32_t > atomic_uint_least32_t;
-typedef atomic< int_least64_t > atomic_int_least64_t;
-typedef atomic< uint_least64_t > atomic_uint_least64_t;
-typedef atomic< int_fast8_t > atomic_int_fast8_t;
-typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
-typedef atomic< int_fast16_t > atomic_int_fast16_t;
-typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
-typedef atomic< int_fast32_t > atomic_int_fast32_t;
-typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
-typedef atomic< int_fast64_t > atomic_int_fast64_t;
-typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
-typedef atomic< intmax_t > atomic_intmax_t;
-typedef atomic< uintmax_t > atomic_uintmax_t;
-
-#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
-typedef atomic< float > atomic_float_t;
-typedef atomic< double > atomic_double_t;
-typedef atomic< long double > atomic_long_double_t;
-#endif
-
-typedef atomic< std::size_t > atomic_size_t;
-typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
-
-#if defined(BOOST_HAS_INTPTR_T)
-typedef atomic< intptr_t > atomic_intptr_t;
-typedef atomic< uintptr_t > atomic_uintptr_t;
-#endif
-
} // namespace atomics
} // namespace boost
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
+#include <boost/atomic/detail/footer.hpp>
-#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_ATOMIC_IMPl_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_ref_impl.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_ref_impl.hpp
new file mode 100644
index 0000000000..59e53332e1
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/atomic_ref_impl.hpp
@@ -0,0 +1,1226 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/atomic_ref_impl.hpp
+ *
+ * This header contains implementation of \c atomic_ref template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_REF_IMPL_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ATOMIC_REF_IMPL_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/addressof.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/bitwise_cast.hpp>
+#include <boost/atomic/detail/core_operations.hpp>
+#include <boost/atomic/detail/wait_operations.hpp>
+#include <boost/atomic/detail/extra_operations.hpp>
+#include <boost/atomic/detail/core_operations_emulated.hpp>
+#include <boost/atomic/detail/memory_order_utils.hpp>
+#include <boost/atomic/detail/type_traits/is_signed.hpp>
+#include <boost/atomic/detail/type_traits/alignment_of.hpp>
+#include <boost/atomic/detail/type_traits/conditional.hpp>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/fp_operations.hpp>
+#include <boost/atomic/detail/extra_fp_operations.hpp>
+#endif
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+/*
+ * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
+ * see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
+ */
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T, bool Signed, bool Interprocess >
+struct is_atomic_ref_lock_free
+{
+ typedef T value_type;
+ typedef atomics::detail::core_operations< sizeof(value_type), Signed, Interprocess > core_operations;
+ typedef typename core_operations::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool value = sizeof(value_type) == sizeof(storage_type) && core_operations::is_always_lock_free;
+};
+
+template< typename T, bool Signed, bool Interprocess >
+class base_atomic_ref_common
+{
+public:
+ typedef T value_type;
+
+protected:
+ typedef typename atomics::detail::conditional<
+ atomics::detail::is_atomic_ref_lock_free< T, Signed, Interprocess >::value,
+ atomics::detail::core_operations< sizeof(value_type), Signed, Interprocess >,
+ atomics::detail::core_operations_emulated< sizeof(value_type), atomics::detail::alignment_of< value_type >::value, Signed, Interprocess >
+ >::type core_operations;
+ typedef atomics::detail::wait_operations< core_operations > wait_operations;
+ typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
+ typedef typename core_operations::storage_type storage_type;
+ BOOST_STATIC_ASSERT_MSG(sizeof(storage_type) == sizeof(value_type), "Boost.Atomic internal error: atomic_ref storage size doesn't match the value size");
+
+public:
+ static BOOST_CONSTEXPR_OR_CONST std::size_t required_alignment = atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment ? core_operations::storage_alignment : atomics::detail::alignment_of< value_type >::value;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = core_operations::is_always_lock_free;
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = wait_operations::always_has_native_wait_notify;
+
+protected:
+ value_type* m_value;
+
+public:
+ BOOST_FORCEINLINE explicit base_atomic_ref_common(value_type& v) BOOST_NOEXCEPT : m_value(atomics::detail::addressof(v))
+ {
+ BOOST_ATOMIC_DETAIL_CLEAR_PADDING(this->m_value);
+ }
+
+ BOOST_FORCEINLINE value_type& value() const BOOST_NOEXCEPT { return *m_value; }
+
+protected:
+ BOOST_FORCEINLINE storage_type& storage() const BOOST_NOEXCEPT
+ {
+ return *reinterpret_cast< storage_type* >(m_value);
+ }
+
+public:
+ BOOST_FORCEINLINE bool is_lock_free() const BOOST_NOEXCEPT
+ {
+ // C++20 specifies that is_lock_free returns true if operations on *all* objects of the atomic_ref<T> type are lock-free.
+ // This does not allow to return true or false depending on the referenced object runtime alignment. Currently, Boost.Atomic
+ // follows this specification, although we may support runtime alignment checking in the future.
+ return is_always_lock_free;
+ }
+
+ BOOST_FORCEINLINE bool has_native_wait_notify() const BOOST_NOEXCEPT
+ {
+ return wait_operations::has_native_wait_notify(this->storage());
+ }
+
+ BOOST_FORCEINLINE void notify_one() const BOOST_NOEXCEPT
+ {
+ wait_operations::notify_one(this->storage());
+ }
+
+ BOOST_FORCEINLINE void notify_all() const BOOST_NOEXCEPT
+ {
+ wait_operations::notify_all(this->storage());
+ }
+};
+
+#if defined(BOOST_NO_CXX17_INLINE_VARIABLES)
+template< typename T, bool Signed, bool Interprocess >
+BOOST_CONSTEXPR_OR_CONST std::size_t base_atomic_ref_common< T, Signed, Interprocess >::required_alignment;
+template< typename T, bool Signed, bool Interprocess >
+BOOST_CONSTEXPR_OR_CONST bool base_atomic_ref_common< T, Signed, Interprocess >::is_always_lock_free;
+template< typename T, bool Signed, bool Interprocess >
+BOOST_CONSTEXPR_OR_CONST bool base_atomic_ref_common< T, Signed, Interprocess >::always_has_native_wait_notify;
+#endif
+
+
+template< typename T, typename Kind, bool Interprocess >
+class base_atomic_ref;
+
+//! General template. Implementation for user-defined types, such as structs, and pointers to non-object types
+template< typename T, bool Interprocess >
+class base_atomic_ref< T, void, Interprocess > :
+ public base_atomic_ref_common< T, false, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< T, false, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+
+//! Implementation for enums
+template< typename T, bool Interprocess >
+class base_atomic_ref< T, const int, Interprocess > :
+ public base_atomic_ref_common< T, false, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< T, false, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+
+//! Implementation for integers
+template< typename T, bool Interprocess >
+class base_atomic_ref< T, int, Interprocess > :
+ public base_atomic_ref_common< T, atomics::detail::is_signed< T >::value, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< T, atomics::detail::is_signed< T >::value, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::value_type difference_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef value_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ // Standard methods
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_add(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_sub(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_and(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_and(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_or(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_or(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_xor(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_xor(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::fetch_negate(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_complement(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::fetch_complement(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::add(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::sub(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::negate(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_and(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::bitwise_and(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_or(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::bitwise_or(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_xor(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::bitwise_xor(this->storage(), static_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_complement(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::bitwise_complement(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_add(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_sub(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_negate(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_and(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_and(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_or(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_or(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_xor(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_xor(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_complement(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_complement(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::add_and_test(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::sub_and_test(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool negate_and_test(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::negate_and_test(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE bool and_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::and_and_test(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool or_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::or_and_test(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool xor_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::xor_and_test(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool complement_and_test(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::complement_and_test(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_set(unsigned int bit_number, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_set(this->storage(), bit_number, order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_reset(unsigned int bit_number, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_reset(this->storage(), bit_number, order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_complement(unsigned int bit_number, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_complement(this->storage(), bit_number, order);
+ }
+
+ // Operators
+ BOOST_FORCEINLINE value_type operator++(int) const BOOST_NOEXCEPT
+ {
+ return fetch_add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator++() const BOOST_NOEXCEPT
+ {
+ return add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--(int) const BOOST_NOEXCEPT
+ {
+ return fetch_sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--() const BOOST_NOEXCEPT
+ {
+ return sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return add(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return sub(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator&=(value_type v) const BOOST_NOEXCEPT
+ {
+ return bitwise_and(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator|=(value_type v) const BOOST_NOEXCEPT
+ {
+ return bitwise_or(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator^=(value_type v) const BOOST_NOEXCEPT
+ {
+ return bitwise_xor(v);
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+//! Implementation for bool
+template< bool Interprocess >
+class base_atomic_ref< bool, int, Interprocess > :
+ public base_atomic_ref_common< bool, false, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< bool, false, Interprocess > base_type;
+
+public:
+ typedef bool value_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef value_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ // Standard methods
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return !!core_operations::load(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return !!core_operations::exchange(this->storage(), static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return !!wait_operations::wait(this->storage(), static_cast< storage_type >(old_val), order);
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = !!old_value;
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = !!old_value;
+ return res;
+ }
+};
+
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+//! Implementation for floating point types
+template< typename T, bool Interprocess >
+class base_atomic_ref< T, float, Interprocess > :
+ public base_atomic_ref_common< T, false, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< T, false, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::value_type difference_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef atomics::detail::fp_operations< extra_operations, value_type > fp_operations;
+ typedef atomics::detail::extra_fp_operations< fp_operations > extra_fp_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef value_type value_arg_type;
+
+private:
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::integral_constant< bool, atomics::detail::value_size_of< value_type >::value != sizeof(storage_type) > has_padding_bits;
+#endif
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, has_padding_bits::value || atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ // We only need to call clear_padding_bits if the compiler does not implement
+ // BOOST_ATOMIC_DETAIL_CLEAR_PADDING, which is called in the base class constructor.
+#if defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ this->clear_padding_bits(has_padding_bits());
+#endif // defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ }
+
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_fp_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return fp_operations::fetch_add(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return fp_operations::fetch_sub(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_fp_cast< value_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::fetch_negate(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::add(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::sub(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::negate(this->storage(), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_add(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_sub(this->storage(), v, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_negate(this->storage(), order);
+ }
+
+ // Operators
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return add(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return sub(v);
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_fp_cast< value_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_fp_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+#if defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ BOOST_FORCEINLINE void clear_padding_bits(atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ }
+
+ BOOST_FORCEINLINE void clear_padding_bits(atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ atomics::detail::clear_tail_padding_bits< atomics::detail::value_size_of< value_type >::value >(this->storage());
+ }
+#endif // defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+
+//! Implementation for pointers to object types
+template< typename T, bool Interprocess >
+class base_atomic_ref< T*, void*, Interprocess > :
+ public base_atomic_ref_common< T*, false, Interprocess >
+{
+private:
+ typedef base_atomic_ref_common< T*, false, Interprocess > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+ typedef std::ptrdiff_t difference_type;
+
+protected:
+ typedef typename base_type::core_operations core_operations;
+ typedef typename base_type::wait_operations wait_operations;
+ typedef atomics::detail::extra_operations< core_operations > extra_operations;
+ typedef typename base_type::storage_type storage_type;
+ typedef value_type value_arg_type;
+
+private:
+#if !defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS) || !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ typedef atomics::detail::true_type cxchg_use_bitwise_cast;
+#else
+ typedef atomics::detail::integral_constant< bool, atomics::detail::alignment_of< value_type >::value <= core_operations::storage_alignment > cxchg_use_bitwise_cast;
+#endif
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_ref(base_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit base_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ // Standard methods
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ core_operations::store(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(core_operations::load(this->storage(), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_add(this->storage(), static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::fetch_sub(this->storage(), static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(core_operations::exchange(this->storage(), atomics::detail::bitwise_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, cxchg_use_bitwise_cast());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::add(this->storage(), static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(extra_operations::sub(this->storage(), static_cast< storage_type >(v * sizeof(T)), order));
+ }
+
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_add(this->storage(), static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_sub(this->storage(), static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::add_and_test(this->storage(), static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ return extra_operations::sub_and_test(this->storage(), static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ // Operators
+ BOOST_FORCEINLINE value_type operator++(int) const BOOST_NOEXCEPT
+ {
+ return fetch_add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator++() const BOOST_NOEXCEPT
+ {
+ return add(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--(int) const BOOST_NOEXCEPT
+ {
+ return fetch_sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator--() const BOOST_NOEXCEPT
+ {
+ return sub(1);
+ }
+
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return add(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) const BOOST_NOEXCEPT
+ {
+ return sub(v);
+ }
+
+ BOOST_FORCEINLINE value_type wait(value_arg_type old_val, memory_order order = memory_order_seq_cst) const BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_cast< value_type >(wait_operations::wait(this->storage(), atomics::detail::bitwise_cast< storage_type >(old_val), order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic_ref& operator=(base_atomic_ref const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_strong(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_strong(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) const BOOST_NOEXCEPT
+ {
+ return core_operations::compare_exchange_weak(this->storage(), reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) const BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = core_operations::compare_exchange_weak(this->storage(), old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
+ return res;
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_ATOMIC_REF_IMPL_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_cast.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_cast.hpp
index 10d165e7c5..c5387f461a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_cast.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_cast.hpp
@@ -5,7 +5,7 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2013 - 2018 Andrey Semashev
+ * Copyright (c) 2013-2018, 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/detail/bitwise_cast.hpp
@@ -21,43 +21,125 @@
#include <boost/atomic/detail/addressof.hpp>
#include <boost/atomic/detail/string_ops.hpp>
#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#include <boost/atomic/detail/type_traits/has_unique_object_representations.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
+#if !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
+
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_bit_cast)
+#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) && defined(BOOST_MSVC) && BOOST_MSVC >= 1926
+#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
+#endif
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
+
+#if defined(BOOST_NO_CXX11_CONSTEXPR) || !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST)
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST BOOST_CONSTEXPR
+#else
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST
+#endif
+
+#if defined(BOOST_GCC) && BOOST_GCC >= 80000
+#pragma GCC diagnostic push
+// copying an object of non-trivial type X from an array of Y. This is benign because we use memcpy to copy trivially copyable objects.
+#pragma GCC diagnostic ignored "-Wclass-memaccess"
+#endif
+
namespace boost {
namespace atomics {
namespace detail {
-template< std::size_t FromSize, typename To >
-BOOST_FORCEINLINE void clear_padding(To& to, atomics::detail::true_type) BOOST_NOEXCEPT
+template< std::size_t ValueSize, typename To >
+BOOST_FORCEINLINE void clear_tail_padding_bits(To& to, atomics::detail::true_type) BOOST_NOEXCEPT
{
- BOOST_ATOMIC_DETAIL_MEMSET(reinterpret_cast< unsigned char* >(atomics::detail::addressof(to)) + FromSize, 0, sizeof(To) - FromSize);
+ BOOST_ATOMIC_DETAIL_MEMSET(reinterpret_cast< unsigned char* >(atomics::detail::addressof(to)) + ValueSize, 0, sizeof(To) - ValueSize);
}
-template< std::size_t FromSize, typename To >
-BOOST_FORCEINLINE void clear_padding(To&, atomics::detail::false_type) BOOST_NOEXCEPT
+template< std::size_t ValueSize, typename To >
+BOOST_FORCEINLINE void clear_tail_padding_bits(To&, atomics::detail::false_type) BOOST_NOEXCEPT
{
}
-template< typename To, std::size_t FromSize, typename From >
-BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
+template< std::size_t ValueSize, typename To >
+BOOST_FORCEINLINE void clear_tail_padding_bits(To& to) BOOST_NOEXCEPT
+{
+ atomics::detail::clear_tail_padding_bits< ValueSize >(to, atomics::detail::integral_constant< bool, ValueSize < sizeof(To) >());
+}
+
+template< typename To, std::size_t FromValueSize, typename From >
+BOOST_FORCEINLINE To bitwise_cast_memcpy(From const& from) BOOST_NOEXCEPT
{
To to;
+#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+ From from2(from);
+ BOOST_ATOMIC_DETAIL_CLEAR_PADDING(atomics::detail::addressof(from2));
+ BOOST_ATOMIC_DETAIL_MEMCPY
+ (
+ atomics::detail::addressof(to),
+ atomics::detail::addressof(from2),
+ (FromValueSize < sizeof(To) ? FromValueSize : sizeof(To))
+ );
+#else
BOOST_ATOMIC_DETAIL_MEMCPY
(
atomics::detail::addressof(to),
atomics::detail::addressof(from),
- (FromSize < sizeof(To) ? FromSize : sizeof(To))
+ (FromValueSize < sizeof(To) ? FromValueSize : sizeof(To))
);
- atomics::detail::clear_padding< FromSize >(to, atomics::detail::integral_constant< bool, FromSize < sizeof(To) >());
+#endif
+ atomics::detail::clear_tail_padding_bits< FromValueSize >(to);
return to;
}
-template< typename To, typename From >
+#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+
+template< typename To, std::size_t FromValueSize, typename From >
+BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast_impl(From const& from, atomics::detail::true_type) BOOST_NOEXCEPT
+{
+ // This implementation is only called when the From type has no padding and From and To have the same size
+ return BOOST_ATOMIC_DETAIL_BIT_CAST(To, from);
+}
+
+template< typename To, std::size_t FromValueSize, typename From >
+BOOST_FORCEINLINE To bitwise_cast_impl(From const& from, atomics::detail::false_type) BOOST_NOEXCEPT
+{
+ return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
+}
+
+template< typename To, std::size_t FromValueSize, typename From >
+BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) BOOST_NOEXCEPT
+{
+ return atomics::detail::bitwise_cast_impl< To, FromValueSize >(from, atomics::detail::integral_constant< bool,
+ FromValueSize == sizeof(To) && atomics::detail::has_unique_object_representations< From >::value >());
+}
+
+#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+
+template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
+ return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
+}
+
+#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+
+//! Converts the source object to the target type, possibly by padding or truncating it on the right, and clearing any padding bits (if supported by compiler). Preserves value bits unchanged.
+template< typename To, typename From >
+BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) BOOST_NOEXCEPT
+{
return atomics::detail::bitwise_cast< To, sizeof(From) >(from);
}
@@ -65,4 +147,10 @@ BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
+#if defined(BOOST_GCC) && BOOST_GCC >= 80000
+#pragma GCC diagnostic pop
+#endif
+
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_fp_cast.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_fp_cast.hpp
index a74b20b972..3f0ede8a3d 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_fp_cast.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/bitwise_fp_cast.hpp
@@ -3,7 +3,7 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2018 Andrey Semashev
+ * Copyright (c) 2018, 2021 Andrey Semashev
*/
/*!
* \file atomic/detail/bitwise_fp_cast.hpp
@@ -18,6 +18,10 @@
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/float_sizes.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#endif
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -31,17 +35,17 @@ namespace detail {
* \brief The type trait returns the size of the value of the specified floating point type
*
* This size may be less than <tt>sizeof(T)</tt> if the implementation uses padding bytes for a particular FP type. This is
- * often the case with 80-bit extended double, which is stored in 12 or 16 bytes with padding filled with garbage.
+ * often the case with 80-bit extended double, which is stored in 12 or 16 initial bytes with tail padding filled with garbage.
*/
template< typename T >
-struct value_sizeof
+struct value_size_of
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = sizeof(T);
};
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
template< >
-struct value_sizeof< float >
+struct value_size_of< float >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE;
};
@@ -49,7 +53,7 @@ struct value_sizeof< float >
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
template< >
-struct value_sizeof< double >
+struct value_size_of< double >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE;
};
@@ -57,30 +61,58 @@ struct value_sizeof< double >
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
template< >
-struct value_sizeof< long double >
+struct value_size_of< long double >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE;
};
#endif
template< typename T >
-struct value_sizeof< const T > : value_sizeof< T > {};
+struct value_size_of< const T > : value_size_of< T > {};
template< typename T >
-struct value_sizeof< volatile T > : value_sizeof< T > {};
+struct value_size_of< volatile T > : value_size_of< T > {};
template< typename T >
-struct value_sizeof< const volatile T > : value_sizeof< T > {};
+struct value_size_of< const volatile T > : value_size_of< T > {};
+
+
+#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
+// BOOST_ATOMIC_DETAIL_CLEAR_PADDING, which is used in bitwise_cast, will clear the tail padding bits in the source object.
+// We don't need to specify the actual value size to avoid redundant zeroing of the tail padding.
+#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) sizeof(x)
+#else
+#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) atomics::detail::value_size_of< x >::value
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
template< typename To, typename From >
-BOOST_FORCEINLINE To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
+BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< To, atomics::detail::value_sizeof< From >::value >(from);
+ // For floating point types, has_unique_object_representations is typically false even if the type contains no padding bits.
+ // Here, we rely on our detection of the actual value size to select constexpr bit_cast implementation when possible. We assume
+ // here that floating point value bits are contiguous.
+ return atomics::detail::bitwise_cast_impl< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from, atomics::detail::integral_constant< bool,
+ atomics::detail::value_size_of< From >::value == sizeof(From) && atomics::detail::value_size_of< From >::value == sizeof(To) >());
}
+#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+
+//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
+template< typename To, typename From >
+BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
+{
+ return atomics::detail::bitwise_cast< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from);
+}
+
+#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
+
} // namespace detail
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/capabilities.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/capabilities.hpp
new file mode 100644
index 0000000000..825bfd118e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/capabilities.hpp
@@ -0,0 +1,217 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/capabilities.hpp
+ *
+ * This header defines core feature capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+#include <boost/atomic/detail/float_sizes.hpp>
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/caps_)
+#elif defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
+#define BOOST_ATOMIC_INT8_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
+#define BOOST_ATOMIC_INT16_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_INT32_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
+#define BOOST_ATOMIC_INT64_LOCK_FREE 0
+#endif
+
+#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_INT128_LOCK_FREE 0
+#endif
+
+
+#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
+#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_CHAR8_T_LOCK_FREE
+#define BOOST_ATOMIC_CHAR8_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
+#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
+#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_INT_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_LONG_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#else
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
+#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
+#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#else
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
+#endif
+#endif
+
+#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
+
+#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
+// We store bools in 1-byte storage in all backends
+#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#endif
+
+#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
+// atomic_flag uses 4-byte storage
+#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+#if !defined(BOOST_ATOMIC_FLOAT_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
+#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
+#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
+#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT <= 16
+#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#else
+#define BOOST_ATOMIC_FLOAT_LOCK_FREE 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
+#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
+#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
+#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE <= 16
+#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#else
+#define BOOST_ATOMIC_DOUBLE_LOCK_FREE 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
+#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
+#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
+#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE <= 16
+#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#else
+#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE 0
+#endif
+#endif
+
+#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+#ifndef BOOST_ATOMIC_THREAD_FENCE
+#define BOOST_ATOMIC_THREAD_FENCE 0
+#endif
+
+#ifndef BOOST_ATOMIC_SIGNAL_FENCE
+#define BOOST_ATOMIC_SIGNAL_FENCE 0
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp
new file mode 100644
index 0000000000..390ccd2e03
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp
@@ -0,0 +1,46 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_arch_gcc_aarch32.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__ARMEL__) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ (defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
+ defined(BOOST_WINDOWS)
+#define BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN
+#elif defined(__ARMEB__) || \
+ defined(__ARM_BIG_ENDIAN) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
+ (defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
+#define BOOST_ATOMIC_DETAIL_AARCH32_BIG_ENDIAN
+#else
+#error "Boost.Atomic: Failed to determine AArch32 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp
new file mode 100644
index 0000000000..1579a8e8bd
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_arch_gcc_aarch64.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__AARCH64EL__) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ (defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
+ defined(BOOST_WINDOWS)
+#define BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN
+#elif defined(__AARCH64EB__) || \
+ defined(__ARM_BIG_ENDIAN) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
+ (defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
+#define BOOST_ATOMIC_DETAIL_AARCH64_BIG_ENDIAN
+#else
+#error "Boost.Atomic: Failed to determine AArch64 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
+#endif
+
+#if defined(__ARM_FEATURE_ATOMICS)
+// ARMv8.1 added Large System Extensions, which includes cas, swp, and a number of other read-modify-write instructions
+#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE
+#endif
+
+#if defined(__ARM_FEATURE_COMPLEX)
+// ARMv8.3 added Release Consistency processor consistent (RCpc) memory model, which includes ldapr and similar instructions.
+// Unfortunately, there seems to be no dedicated __ARM_FEATURE macro for this, so we use __ARM_FEATURE_COMPLEX, which is also defined starting ARMv8.3.
+#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_alpha.hpp
index 861432f58a..a73a7c1c71 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_alpha.hpp
@@ -8,13 +8,13 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/caps_gcc_alpha.hpp
+ * \file atomic/detail/caps_arch_gcc_alpha.hpp
*
* This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -31,4 +31,4 @@
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
-#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_arm.hpp
index 6d0c338622..8c697c9760 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_arm.hpp
@@ -3,16 +3,20 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2017 Andrey Semashev
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * ARM Code by Phil Endecott, based on other architectures.
+ * Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
- * \file atomic/detail/hwcaps_gcc_arm.hpp
+ * \file atomic/detail/caps_arch_gcc_arm.hpp
*
- * This header defines hardware capabilities macros for ARM
+ * This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
@@ -21,7 +25,21 @@
#pragma once
#endif
-#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+#if defined(__ARMEL__) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ (defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
+ defined(BOOST_WINDOWS)
+#define BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN
+#elif defined(__ARMEB__) || \
+ defined(__ARM_BIG_ENDIAN) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
+ (defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
+#define BOOST_ATOMIC_DETAIL_ARM_BIG_ENDIAN
+#else
+#error "Boost.Atomic: Failed to determine ARM endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
+#endif
+
+#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
#if BOOST_ATOMIC_DETAIL_ARM_ARCH > 6
// ARMv7 and later have dmb instruction
@@ -62,6 +80,17 @@
#endif // defined(__ARM_FEATURE_LDREX)
-#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
-#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_ppc.hpp
index 2ec1e327a7..a4665e67ad 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_ppc.hpp
@@ -3,16 +3,18 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2017 Andrey Semashev
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/hwcaps_gcc_ppc.hpp
+ * \file atomic/detail/caps_arch_gcc_ppc.hpp
*
- * This header defines hardware capabilities macros for PowerPC
+ * This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -39,4 +41,15 @@
#endif // defined(__POWERPC__) || defined(__PPC__)
-#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_sparc.hpp
index 5806684926..9b4930194e 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_sparc.hpp
@@ -8,13 +8,13 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/caps_gcc_sparc.hpp
+ * \file atomic/detail/caps_arch_gcc_sparc.hpp
*
* This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -31,4 +31,4 @@
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
-#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_x86.hpp
index 91a1aee3aa..b705fdc939 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_gcc_x86.hpp
@@ -3,16 +3,18 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2017 Andrey Semashev
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/hwcaps_gcc_x86.hpp
+ * \file atomic/detail/caps_arch_gcc_x86.hpp
*
- * This header defines hardware capabilities macros for x86
+ * This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -55,4 +57,18 @@
#endif // defined(__GNUC__)
-#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_arm.hpp
index 6b3c61fb3e..3cfb99d74e 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_arm.hpp
@@ -8,13 +8,13 @@
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/caps_msvc_arm.hpp
+ * \file atomic/detail/caps_arch_msvc_arm.hpp
*
* This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -31,4 +31,4 @@
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
-#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_x86.hpp
index 2ee4c92111..87b5cb0f83 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_arch_msvc_x86.hpp
@@ -8,13 +8,13 @@
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/caps_msvc_x86.hpp
+ * \file atomic/detail/caps_arch_msvc_x86.hpp
*
* This header defines feature capabilities macros
*/
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -26,9 +26,15 @@
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
-#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#if defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#if defined(__clang__)
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
+#elif _MSC_VER >= 1500
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+#endif
#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
// Use mfence only if SSE2 is available
@@ -43,7 +49,7 @@
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
@@ -52,4 +58,4 @@
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
-#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp
deleted file mode 100644
index a26ea56ee5..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2009 Phil Endecott
- * Copyright (c) 2013 Tim Blechmann
- * ARM Code by Phil Endecott, based on other architectures.
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/caps_gcc_arm.hpp
- *
- * This header defines feature capabilities macros
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
-
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#define BOOST_ATOMIC_INT8_LOCK_FREE 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE 2
-#define BOOST_ATOMIC_INT32_LOCK_FREE 2
-#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
-#define BOOST_ATOMIC_INT64_LOCK_FREE 2
-#endif
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
-
-#define BOOST_ATOMIC_THREAD_FENCE 2
-#define BOOST_ATOMIC_SIGNAL_FENCE 2
-
-#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp
index 3b518cf49c..2bd7ab46ed 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp
@@ -3,7 +3,7 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_atomic.hpp
@@ -16,118 +16,143 @@
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
-#if defined(__i386__) || defined(__x86_64__)
-#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
-#elif defined(__arm__)
-#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
-#elif defined(__POWERPC__) || defined(__PPC__)
-#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
-#define BOOST_ATOMIC_INT128_LOCK_FREE 2
-#else
-#define BOOST_ATOMIC_INT128_LOCK_FREE 0
-#endif
+// Translate type-based lock-free macros to size-based ones
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
-#if (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8)
-#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
-#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE 0
#endif
-#if (__GCC_ATOMIC_LONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8)
-#define BOOST_ATOMIC_LONG_LOCK_FREE 2
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
-#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE 0
#endif
-#if __GCC_ATOMIC_INT_LOCK_FREE == 2
-#define BOOST_ATOMIC_INT_LOCK_FREE 2
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
-#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
#endif
-#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2
-#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 16
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 16
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 16
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 16
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
-#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
#endif
-#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2
-#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
-#else
-#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+// On x86-64, clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16:
+// https://bugs.llvm.org/show_bug.cgi?id=19149
+// Another problem exists with gcc 7 and later, as it requires to link with libatomic to use 16-byte intrinsics:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
+// Both clang and gcc do generate cmpxchg16b for __sync_val_compare_and_swap though.
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) &&\
+ (\
+ (defined(BOOST_CLANG) && (__clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 5))) ||\
+ (defined(BOOST_GCC) && BOOST_GCC >= 70000)\
+ )
+#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
#endif
-#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
-#else
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
+// On 32-bit x86, there is a clang bug for 64-bit atomics: https://bugs.llvm.org/show_bug.cgi?id=19355. The compiler defines
+// __GCC_ATOMIC_LLONG_LOCK_FREE to 1 when the target architecture supports 64-bit atomic instructions (i.e. the value should be 2).
+// Additionally, any clang version requires to link with libatomic for 64-bit __atomic* intrinsics on x86. It does generate
+// cmpxchg8b for __sync_val_compare_and_swap though.
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && defined(BOOST_CLANG)
+#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
#endif
+// Override arch-specific macros if atomic intrinsics provide better guarantees
+#if !defined(BOOST_ATOMIC_INT128_LOCK_FREE) || (BOOST_ATOMIC_INT128_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE)
+#undef BOOST_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_INT128_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
+#endif
-#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE
-
-#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#if !defined(BOOST_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_INT128_LOCK_FREE)
+#undef BOOST_ATOMIC_INT64_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
#else
-#define BOOST_ATOMIC_INT16_LOCK_FREE 0
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#endif
#endif
-#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
-#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
-#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
-#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
-#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#if !defined(BOOST_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_INT64_LOCK_FREE)
+#undef BOOST_ATOMIC_INT32_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE
#else
-#define BOOST_ATOMIC_INT32_LOCK_FREE 0
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#endif
#endif
-#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
-#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
-#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
-#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
-#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#if !defined(BOOST_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_INT32_LOCK_FREE)
+#undef BOOST_ATOMIC_INT16_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE
#else
-#define BOOST_ATOMIC_INT64_LOCK_FREE 0
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#endif
#endif
-
-#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
-#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#if !defined(BOOST_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_INT16_LOCK_FREE)
+#undef BOOST_ATOMIC_INT8_LOCK_FREE
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
+#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE
#else
-#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
+#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#endif
#endif
-#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
-#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#if !defined(BOOST_ATOMIC_POINTER_LOCK_FREE) || (BOOST_ATOMIC_POINTER_LOCK_FREE < __GCC_ATOMIC_POINTER_LOCK_FREE)
+#undef BOOST_ATOMIC_POINTER_LOCK_FREE
+#define BOOST_ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#endif
+#if !defined(BOOST_ATOMIC_THREAD_FENCE) || (BOOST_ATOMIC_THREAD_FENCE < 2)
+#undef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 2
+#endif
+#if !defined(BOOST_ATOMIC_SIGNAL_FENCE) || (BOOST_ATOMIC_SIGNAL_FENCE < 2)
+#undef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 2
+#endif
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp
deleted file mode 100644
index 3e20fdee45..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2013 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/caps_gcc_ppc.hpp
- *
- * This header defines feature capabilities macros
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
-
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#define BOOST_ATOMIC_INT8_LOCK_FREE 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE 2
-#define BOOST_ATOMIC_INT32_LOCK_FREE 2
-#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
-#define BOOST_ATOMIC_INT64_LOCK_FREE 2
-#endif
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
-
-#define BOOST_ATOMIC_THREAD_FENCE 2
-#define BOOST_ATOMIC_SIGNAL_FENCE 2
-
-#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp
index ffbe605a1a..43065fee9a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp
@@ -17,13 +17,6 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
-#if defined(__i386__) || defined(__x86_64__)
-#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
-#elif defined(__arm__)
-#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
-#elif defined(__POWERPC__) || defined(__PPC__)
-#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
-#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp
deleted file mode 100644
index 70c64628af..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2013 - 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/caps_gcc_x86.hpp
- *
- * This header defines feature capabilities macros
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
-
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#define BOOST_ATOMIC_INT8_LOCK_FREE 2
-#define BOOST_ATOMIC_INT16_LOCK_FREE 2
-#define BOOST_ATOMIC_INT32_LOCK_FREE 2
-#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
-#define BOOST_ATOMIC_INT64_LOCK_FREE 2
-#endif
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
-#define BOOST_ATOMIC_INT128_LOCK_FREE 2
-#endif
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
-
-#define BOOST_ATOMIC_THREAD_FENCE 2
-#define BOOST_ATOMIC_SIGNAL_FENCE 2
-
-#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/cas_based_exchange.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/cas_based_exchange.hpp
new file mode 100644
index 0000000000..ba74dd2efb
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/cas_based_exchange.hpp
@@ -0,0 +1,50 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/cas_based_exchange.hpp
+ *
+ * This header contains CAS-based implementation of exchange operation.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct cas_based_exchange :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/classify.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/classify.hpp
new file mode 100644
index 0000000000..920e9cf109
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/classify.hpp
@@ -0,0 +1,90 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020-2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/classify.hpp
+ *
+ * This header contains type traits for type classification.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/type_traits/is_enum.hpp>
+#include <boost/atomic/detail/type_traits/is_integral.hpp>
+#include <boost/atomic/detail/type_traits/is_function.hpp>
+#include <boost/atomic/detail/type_traits/is_floating_point.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T, bool IsFunction = atomics::detail::is_function< T >::value >
+struct classify_pointer
+{
+ typedef void* type;
+};
+
+template< typename T >
+struct classify_pointer< T, true >
+{
+ typedef void type;
+};
+
+template<
+ typename T,
+ bool IsInt = atomics::detail::is_integral< T >::value,
+ bool IsFloat = atomics::detail::is_floating_point< T >::value,
+ bool IsEnum = atomics::detail::is_enum< T >::value
+>
+struct classify
+{
+ typedef void type;
+};
+
+template< typename T >
+struct classify< T, true, false, false > { typedef int type; };
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+template< typename T >
+struct classify< T, false, true, false > { typedef float type; };
+#endif
+
+template< typename T >
+struct classify< T, false, false, true > { typedef const int type; };
+
+template< typename T >
+struct classify< T*, false, false, false > { typedef typename classify_pointer< T >::type type; };
+
+template< >
+struct classify< void*, false, false, false > { typedef void type; };
+
+template< >
+struct classify< const void*, false, false, false > { typedef void type; };
+
+template< >
+struct classify< volatile void*, false, false, false > { typedef void type; };
+
+template< >
+struct classify< const volatile void*, false, false, false > { typedef void type; };
+
+template< typename T, typename U >
+struct classify< T U::*, false, false, false > { typedef void type; };
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/config.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/config.hpp
index d2a6afd203..b2031d8d43 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/config.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/config.hpp
@@ -4,7 +4,7 @@
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2012 Hartmut Kaiser
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014-2018, 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/detail/config.hpp
@@ -36,24 +36,49 @@
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA
#endif
-#if (defined(__i386__) || defined(__x86_64__)) && (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40500) || defined(__SUNPRO_CC))
+#if (defined(__i386__) || defined(__x86_64__)) && (defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC < 40500) || defined(__SUNPRO_CC))
// This macro indicates that the compiler does not support allocating eax:edx or rax:rdx register pairs ("A") in asm blocks
#define BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS
#endif
-#if defined(__i386__) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 50100))
+#if defined(__i386__) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC >= 50100))
// This macro indicates that asm blocks should preserve ebx value unchanged. Some compilers are able to maintain ebx themselves
// around the asm blocks. For those compilers we don't need to save/restore ebx in asm blocks.
#define BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX
#endif
#if defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
-#if !(defined(BOOST_LIBSTDCXX11) && (BOOST_LIBSTDCXX_VERSION+0) >= 40700) /* libstdc++ from gcc >= 4.7 in C++11 mode */
+#if !(defined(BOOST_LIBSTDCXX11) && BOOST_LIBSTDCXX_VERSION >= 40700) /* libstdc++ from gcc >= 4.7 in C++11 mode */
// This macro indicates that there is not even a basic <type_traits> standard header that is sufficient for most Boost.Atomic needs.
#define BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS
#endif
#endif // defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+#if defined(BOOST_NO_CXX11_ALIGNAS) ||\
+ (defined(BOOST_GCC) && BOOST_GCC < 40900) ||\
+ (defined(BOOST_MSVC) && BOOST_MSVC < 1910 && defined(_M_IX86))
+// gcc prior to 4.9 doesn't support alignas with a constant expression as an argument.
+// MSVC 14.0 does support alignas, but in 32-bit mode emits "error C2719: formal parameter with requested alignment of N won't be aligned" for N > 4,
+// when aligned types are used in function arguments, even though the std::max_align_t type has alignment of 8.
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS
+#endif
+
+#if defined(BOOST_NO_CXX11_CONSTEXPR) || (defined(BOOST_GCC) && BOOST_GCC < 40800)
+// This macro indicates that the compiler doesn't support constexpr constructors that initialize one member
+// of an anonymous union member of the class.
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT
+#endif
+
+#if (defined(_MSC_VER) && (_MSC_VER < 1914 || _MSVC_LANG < 201703)) || (!defined(_MSC_VER) && (!defined(__cpp_deduction_guides) || __cpp_deduction_guides < 201606))
+#define BOOST_ATOMIC_DETAIL_NO_CXX17_DEDUCTION_GUIDES
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT)
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT BOOST_CONSTEXPR
+#else
+#define BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT
+#endif
+
// Enable pointer/reference casts between storage and value when possible.
// Note: Despite that MSVC does not employ strict aliasing rules for optimizations
// and does not require an explicit markup for types that may alias, we still don't
@@ -70,11 +95,31 @@
#define BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS
#endif
+#if defined(BOOST_INTEL) || (defined(BOOST_GCC) && BOOST_GCC < 40700) ||\
+ (defined(BOOST_CLANG) && !defined(__apple_build_version__) && (__clang_major__ * 100 + __clang_minor__) < 302) ||\
+ (defined(__clang__) && defined(__apple_build_version__) && (__clang_major__ * 100 + __clang_minor__) < 402)
+// Intel compiler (at least 18.0 update 1) breaks if noexcept specification is used in defaulted function declarations:
+// error: the default constructor of "boost::atomics::atomic<T>" cannot be referenced -- it is a deleted function
+// GCC 4.6 doesn't seem to support that either. Clang 3.1 deduces wrong noexcept for the defaulted function and fails as well.
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL BOOST_NOEXCEPT
+#else
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL BOOST_NOEXCEPT
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
+#endif
+
#if defined(__has_builtin)
#if __has_builtin(__builtin_constant_p)
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
#endif
-#elif defined(__GNUC__)
+#if __has_builtin(__builtin_clear_padding)
+#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_clear_padding(x)
+#elif __has_builtin(__builtin_zero_non_value_bits)
+#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT) && defined(__GNUC__)
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
#endif
@@ -82,7 +127,18 @@
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) false
#endif
-#if (defined(__BYTE_ORDER__) && defined(__FLOAT_WORD_ORDER__) && (__BYTE_ORDER__+0) == (__FLOAT_WORD_ORDER__+0)) ||\
+#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING) && defined(BOOST_MSVC) && BOOST_MSVC >= 1927
+// Note that as of MSVC 19.29 this intrinsic does not clear padding in unions:
+// https://developercommunity.visualstudio.com/t/__builtin_zero_non_value_bits-does-not-c/1551510
+#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING)
+#define BOOST_ATOMIC_NO_CLEAR_PADDING
+#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x)
+#endif
+
+#if (defined(__BYTE_ORDER__) && defined(__FLOAT_WORD_ORDER__) && __BYTE_ORDER__ == __FLOAT_WORD_ORDER__) ||\
defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
// This macro indicates that integer and floating point endianness is the same
#define BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH
@@ -107,8 +163,8 @@
// gcc since 4.5 supports deprecated attribute with a message; older versions support the attribute without a message.
// Oracle Studio 12.4 supports deprecated attribute with a message; this is the first release that supports the attribute.
#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && (\
- (defined(__GNUC__) && ((__GNUC__ + 0) * 100 + (__GNUC_MINOR__ + 0)) >= 405) ||\
- (defined(__SUNPRO_CC) && (__SUNPRO_CC + 0) >= 0x5130))
+ (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 405) ||\
+ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5130))
#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __attribute__((deprecated(msg)))
#endif
@@ -130,21 +186,11 @@
#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg)
#endif
-// In Boost.Atomic 1.67 we changed (op)_and_test methods to return true when the result is non-zero. This would be more consistent
-// with the other names used in Boost.Atomic and the C++ standard library. Since the methods were announced as experimental and
-// the previous behavior was released only in Boost 1.66, it was decided to change the result without changing the method names.
-// By defining BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST the user has a way to highlight all uses of the affected functions so
-// that it is easier to find and update the affected code (which is typically adding or removing negation of the result). This
-// highlighting functionality is a temporary measure to help users upgrade from Boost 1.66 to newer Boost versions. It will
-// be removed eventually.
-//
-// More info at:
-// https://github.com/boostorg/atomic/issues/11
-// http://boost.2283326.n4.nabble.com/atomic-op-and-test-naming-tc4701445.html
-#if defined(BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST)
-#define BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST BOOST_ATOMIC_DETAIL_DEPRECATED("Boost.Atomic 1.67 has changed (op)_and_test result to the opposite. The functions now return true when the result is non-zero. Please, verify your use of the operation and undefine BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST.")
+// In Boost.Atomic 1.73 we deprecated atomic<>::storage() accessor in favor of atomic<>::value(). In future releases storage() will be removed.
+#if !defined(BOOST_ATOMIC_SILENCE_STORAGE_DEPRECATION)
+#define BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED BOOST_ATOMIC_DETAIL_DEPRECATED("Boost.Atomic 1.73 has deprecated atomic<>::storage() in favor of atomic<>::value() and atomic<>::storage_type in favor of atomic<>::value_type. You can define BOOST_ATOMIC_SILENCE_STORAGE_DEPRECATION to disable this warning.")
#else
-#define BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
+#define BOOST_ATOMIC_DETAIL_STORAGE_DEPRECATED
#endif
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations.hpp
new file mode 100644
index 0000000000..40a10814d1
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations.hpp
@@ -0,0 +1,50 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_arch_operations.hpp
+ *
+ * This header defines core atomic operations, including the emulated version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/core_operations_emulated.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/core_arch_ops_)
+#endif
+
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Default specialization that falls back to lock-based implementation
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_arch_operations :
+ public core_operations_emulated< Size, storage_traits< Size >::alignment, Signed, Interprocess >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations_fwd.hpp
new file mode 100644
index 0000000000..f018fe4e85
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_operations_fwd.hpp
@@ -0,0 +1,38 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_arch_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c core_arch_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp
new file mode 100644
index 0000000000..0e5d7b24e7
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp
@@ -0,0 +1,1121 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_arch_ops_gcc_aarch32.hpp
+ *
+ * This header contains implementation of the \c core_arch_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/ops_gcc_aarch32_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// ARMv8 (AArch32) instruction set is similar to ARMv7, but adds
+// lda(b/h) and ldaex(b/h/d) instructions for acquire loads and
+// stl(b/h) and stlex(b/h/d) instructions for release stores. This
+// makes explicit memory fences unnecessary for implementation of
+// the majority of the atomic operations.
+//
+// ARMv8 deprecates applying "it" hints to some instructions, including
+// strex. It also deprecates "it" hints applying to more than one
+// of the following conditional instructions. This means we have to
+// use conditional jumps instead of making other instructions conditional.
+
+struct core_arch_operations_gcc_aarch32_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch32_base
+{
+ typedef typename storage_traits< 1u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlb %[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "ldab %[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "st" st_mo "exb %[tmp], %[value], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [tmp] "=&r" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)\
+ : [value] "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxtb %[expected], %[expected]\n\t"\
+ "mov %[success], #0\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 1f\n\t"\
+ "st" st_mo "exb %[success], %[desired], %[storage]\n\t"\
+ "eor %[success], %[success], #1\n\t"\
+ "1:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "r" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxtb %[expected], %[expected]\n\t"\
+ "mov %[success], #0\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 2f\n\t"\
+ "st" st_mo "exb %[success], %[desired], %[storage]\n\t"\
+ "eors %[success], %[success], #1\n\t"\
+ "beq 1b\n\t"\
+ "2:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "r" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "add %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "sub %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "and %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "orr %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "eor %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch32_base
+{
+ typedef typename storage_traits< 2u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlh %[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "ldah %[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "st" st_mo "exh %[tmp], %[value], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [tmp] "=&r" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)\
+ : [value] "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxth %[expected], %[expected]\n\t"\
+ "mov %[success], #0\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 1f\n\t"\
+ "st" st_mo "exh %[success], %[desired], %[storage]\n\t"\
+ "eor %[success], %[success], #1\n\t"\
+ "1:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "r" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxth %[expected], %[expected]\n\t"\
+ "mov %[success], #0\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 2f\n\t"\
+ "st" st_mo "exh %[success], %[desired], %[storage]\n\t"\
+ "eors %[success], %[success], #1\n\t"\
+ "beq 1b\n\t"\
+ "2:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "r" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "add %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "sub %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "and %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "orr %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "eor %[result], %[original], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch32_base
+{
+ typedef typename storage_traits< 4u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stl %[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "lda %[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "st" st_mo "ex %[tmp], %[value], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [tmp] "=&r" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)\
+ : [value] "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %[success], #0\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 1f\n\t"\
+ "st" st_mo "ex %[success], %[desired], %[storage]\n\t"\
+ "eor %[success], %[success], #1\n\t"\
+ "1:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "Ir" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %[success], #0\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "cmp %[original], %[expected]\n\t"\
+ "bne 2f\n\t"\
+ "st" st_mo "ex %[success], %[desired], %[storage]\n\t"\
+ "eors %[success], %[success], #1\n\t"\
+ "beq 1b\n\t"\
+ "2:\n\t"\
+ : [original] "=&r" (original), [success] "=&r" (success), [storage] "+Q" (storage)\
+ : [expected] "Ir" (expected), [desired] "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "add %[result], %[original], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "sub %[result], %[original], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "and %[result], %[original], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "orr %[result], %[original], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "eor %[result], %[original], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+
+// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd.
+// Other instructions result in a non-atomic sequence of 32-bit or more fine-grained accesses.
+// See "ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile", Section E2.2 "Atomicity in the ARM architecture".
+// Section E2.3.7 "Memory barriers", subsection "Load-Acquire, Store-Release" extends atomicity guarantees given for ldrexd/strexd
+// to the new ldaexd/stlexd instructions with acquire/release semantics.
+//
+// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values. In order to pass the 64-bit operands
+// to/from asm blocks, we use undocumented gcc feature: the lower half (Rt) of the operand is accessible normally, via the numbered
+// placeholder (e.g. %0), and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).
+// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/
+//
+// The ldrexd and strexd instructions operate on pairs of registers, meaning that each load loads two integers from memory in
+// successive address order, to the first and second registers in the pair, respectively, and store similarly stores two integers.
+// The order of these integers does not depend on the active endianness mode (although the byte order in the integers themselves
+// obviously does depend on endianness). This means we need to account for the current endianness mode ourselves, where it matters.
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch32_base
+{
+ typedef typename storage_traits< 8u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "ldaexd %0, %H0, %1\n\t"
+ : "=&r" (original) // %0
+ : "Q" (storage) // %1
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "ldrexd %0, %H0, %1\n\t"
+ : "=&r" (original) // %0
+ : "Q" (storage) // %1
+ );
+ }
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %1, %H1, %2\n\t"\
+ "st" st_mo "exd %0, %3, %H3, %2\n\t"\
+ "teq %0, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (tmp), "=&r" (original), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %1, #0\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "cmp %0, %3\n\t"\
+ "it eq\n\t"\
+ "cmpeq %H0, %H3\n\t"\
+ "bne 1f\n\t"\
+ "st" st_mo "exd %1, %4, %H4, %2\n\t"\
+ "eor %1, %1, #1\n\t"\
+ "1:\n\t"\
+ : "=&r" (original), "=&r" (success), "+Q" (storage)\
+ : "r" (expected), "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ bool success;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %1, #0\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "cmp %0, %3\n\t"\
+ "it eq\n\t"\
+ "cmpeq %H0, %H3\n\t"\
+ "bne 2f\n\t"\
+ "st" st_mo "exd %1, %4, %H4, %2\n\t"\
+ "eors %1, %1, #1\n\t"\
+ "beq 1b\n\t"\
+ "2:\n\t"\
+ : "=&r" (original), "=&r" (success), "+Q" (storage)\
+ : "r" (expected), "r" (desired)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ expected = original;
+
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "adds " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(3) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(4) "\n\t"\
+ "adc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(3) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(4) "\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "subs " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(3) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(4) "\n\t"\
+ "sbc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(3) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(4) "\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "and %3, %0, %4\n\t"\
+ "and %H3, %H0, %H4\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "orr %3, %0, %4\n\t"\
+ "orr %H3, %H0, %H4\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "eor %3, %0, %4\n\t"\
+ "eor %H3, %H0, %H4\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp
new file mode 100644
index 0000000000..192cf6e35d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp
@@ -0,0 +1,1909 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_arch_ops_gcc_aarch64.hpp
+ *
+ * This header contains implementation of the \c core_arch_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/ops_gcc_aarch64_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct core_arch_operations_gcc_aarch64_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+};
+
+// Due to bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63359 we have to explicitly specify size of the registers
+// to use in the asm blocks below. Use %w prefix for the 32-bit registers and %x for 64-bit ones.
+
+// A note about compare_exchange implementations. Since failure_order must never include release semantics and
+// must not be stronger than success_order, we can always use success_order to select instructions. Thus, when
+// CAS fails, only the acquire semantics of success_order is applied, which may be stronger than failure_order.
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch64_base
+{
+ typedef typename storage_traits< 1u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlrb %w[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
+ if (order == memory_order_consume || order == memory_order_acquire)
+ {
+ __asm__ __volatile__
+ (
+ "ldaprb %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+#endif
+ {
+ __asm__ __volatile__
+ (
+ "ldarb %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "swp" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[value], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo "b %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxtb %w[expected], %w[expected]\n\t"\
+ "mov %w[success], #0\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 1f\n\t"\
+ "st" st_mo "xrb %w[success], %w[desired], %[storage]\n\t"\
+ "eor %w[success], %w[success], #1\n\t"\
+ "1:\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "r" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo "b %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxtb %w[expected], %w[expected]\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 2f\n\t"\
+ "st" st_mo "xrb %w[success], %w[desired], %[storage]\n\t"\
+ "cbnz %w[success], 1b\n\t"\
+ "2:\n\t"\
+ "cset %w[success], eq\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "r" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "add %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = -v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "sub %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = ~v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldclr" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "and %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldset" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "orr %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldeor" ld_mo st_mo "b %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "eor %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch64_base
+{
+ typedef typename storage_traits< 2u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlrh %w[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
+ if (order == memory_order_consume || order == memory_order_acquire)
+ {
+ __asm__ __volatile__
+ (
+ "ldaprh %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+#endif
+ {
+ __asm__ __volatile__
+ (
+ "ldarh %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "swp" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[value], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo "h %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxth %w[expected], %w[expected]\n\t"\
+ "mov %w[success], #0\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 1f\n\t"\
+ "st" st_mo "xrh %w[success], %w[desired], %[storage]\n\t"\
+ "eor %w[success], %w[success], #1\n\t"\
+ "1:\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "r" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo "h %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "uxth %w[expected], %w[expected]\n\t"\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 2f\n\t"\
+ "st" st_mo "xrh %w[success], %w[desired], %[storage]\n\t"\
+ "cbnz %w[success], 1b\n\t"\
+ "2:\n\t"\
+ "cset %w[success], eq\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "r" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "add %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = -v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "sub %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = ~v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldclr" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "and %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldset" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "orr %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldeor" ld_mo st_mo "h %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "eor %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch64_base
+{
+ typedef typename storage_traits< 4u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlr %w[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
+ if (order == memory_order_consume || order == memory_order_acquire)
+ {
+ __asm__ __volatile__
+ (
+ "ldapr %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+#endif
+ {
+ __asm__ __volatile__
+ (
+ "ldar %w[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "swp" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[value], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo " %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %w[success], #0\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 1f\n\t"\
+ "st" st_mo "xr %w[success], %w[desired], %[storage]\n\t"\
+ "eor %w[success], %w[success], #1\n\t"\
+ "1:\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "Ir" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo " %w[original], %w[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "cmp %w[original], %w[expected]\n\t"\
+ "b.ne 2f\n\t"\
+ "st" st_mo "xr %w[success], %w[desired], %[storage]\n\t"\
+ "cbnz %w[success], 1b\n\t"\
+ "2:\n\t"\
+ "cset %w[success], eq\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "Ir" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "add %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = -v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "sub %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = ~v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldclr" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "and %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldset" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "orr %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldeor" ld_mo st_mo " %w[value], %w[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "eor %w[result], %w[original], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch64_base
+{
+ typedef typename storage_traits< 8u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "stlr %x[value], %[storage]\n\t"
+ : [storage] "=Q" (storage)
+ : [value] "r" (v)
+ : "memory"
+ );
+ }
+ else
+ {
+ storage = v;
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
+ if (order == memory_order_consume || order == memory_order_acquire)
+ {
+ __asm__ __volatile__
+ (
+ "ldapr %x[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+#endif
+ {
+ __asm__ __volatile__
+ (
+ "ldar %x[value], %[storage]\n\t"
+ : [value] "=r" (v)
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ }
+ else
+ {
+ v = storage;
+ }
+
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "swp" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[value], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo " %x[original], %x[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %w[success], #0\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "cmp %x[original], %x[expected]\n\t"\
+ "b.ne 1f\n\t"\
+ "st" st_mo "xr %w[success], %x[desired], %[storage]\n\t"\
+ "eor %w[success], %w[success], #1\n\t"\
+ "1:\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "Ir" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ original = expected;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "cas" ld_mo st_mo " %x[original], %x[desired], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "+r" (original)\
+ : [desired] "r" (desired)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+ bool success = original == expected;
+#else
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "cmp %x[original], %x[expected]\n\t"\
+ "b.ne 2f\n\t"\
+ "st" st_mo "xr %w[success], %x[desired], %[storage]\n\t"\
+ "cbnz %w[success], 1b\n\t"\
+ "2:\n\t"\
+ "cset %w[success], eq\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [desired] "r" (desired), [expected] "Ir" (expected)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#endif
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "add %x[result], %x[original], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = -v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldadd" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "sub %x[result], %x[original], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+ v = ~v;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldclr" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "and %x[result], %x[original], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldset" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "orr %x[result], %x[original], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "ldeor" ld_mo st_mo " %x[value], %x[original], %[storage]\n\t"\
+ : [storage] "+Q" (storage), [original] "=r" (original)\
+ : [value] "r" (v)\
+ : "memory"\
+ );
+#else
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "eor %x[result], %x[original], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+#endif
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+// For 128-bit atomic operations we always have to use ldxp+stxp (optionally, with acquire/release semantics), even in load and store operations.
+// ARM Architecture Reference Manual Armv8, for Armv8-A architecture profile, Section B2.2.1 "Requirements for single-copy atomicity"
+// specifies that ldxp does not guarantee an atomic load, and we have to perform ldxp+stxp loop to ensure that the loaded value
+// is consistent with a previous atomic store.
+//
+// The ldxp and stxp instructions operate on pairs of registers, meaning that each load loads two integers from memory in
+// successive address order, to the first and second registers in the pair, respectively, and store similarly stores two integers.
+// The order of these integers does not depend on the active endianness mode (although the byte order in the integers themselves
+// obviously does depend on endianness). This means we need to account for the current endianness mode ourselves, where it matters.
+//
+// Unlike AArch32/A32 or ARMv7, ldxp/stxp do not require adjacent even+odd registers in the pair and accept any two different
+// registers. Still, it may be more preferable to select the adjacent registers as 128-bit objects are represented by two adjacent
+// registers in the ABI. Unfortunately, clang 10 and probably older doesn't seem to support allocating register pairs in the asm blocks,
+// like in ARMv7. For now we use a union to convert between a pair of 64-bit elements and 128-bit storage.
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 16u, Signed, Interprocess > :
+ public core_arch_operations_gcc_aarch64_base
+{
+ typedef typename storage_traits< 16u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ // Union to convert between two 64-bit registers and a 128-bit storage
+ union storage_union
+ {
+ storage_type as_storage;
+ uint64_t as_uint64[2u];
+ };
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union v;
+ uint32_t tmp;
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldaxp %x[value_0], %x[value_1], %[storage]\n\t"
+ "stxp %w[tmp], %x[value_0], %x[value_1], %[storage]\n\t"
+ "cbnz %w[tmp], 1b\n\t"
+ : [tmp] "=&r" (tmp), [value_0] "=&r" (v.as_uint64[0u]), [value_1] "=&r" (v.as_uint64[1u])
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldxp %x[value_0], %x[value_1], %[storage]\n\t"
+ "stxp %w[tmp], %x[value_0], %x[value_1], %[storage]\n\t"
+ "cbnz %w[tmp], 1b\n\t"
+ : [tmp] "=&r" (tmp), [value_0] "=&r" (v.as_uint64[0u]), [value_1] "=&r" (v.as_uint64[1u])
+ : [storage] "Q" (storage)
+ : "memory"
+ );
+ }
+
+ return v.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[value_0], %x[value_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u])\
+ : [value_0] "r" (value.as_uint64[0u]), [value_1] "r" (value.as_uint64[1u])\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union e = { expected };
+ storage_union d = { desired };
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "mov %w[success], #0\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "cmp %x[original_0], %x[expected_0]\n\t"\
+ "ccmp %x[original_1], %x[expected_1], #0, eq\n\t"\
+ "b.ne 1f\n\t"\
+ "st" st_mo "xp %w[success], %x[desired_0], %x[desired_1], %[storage]\n\t"\
+ "eor %w[success], %w[success], #1\n\t"\
+ "1:\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u])\
+ : [desired_0] "r" (d.as_uint64[0u]), [desired_1] "r" (d.as_uint64[1u]), [expected_0] "r" (e.as_uint64[0u]), [expected_1] "r" (e.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original.as_storage;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union e = { expected };
+ storage_union d = { desired };
+ bool success;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "cmp %x[original_0], %x[expected_0]\n\t"\
+ "ccmp %x[original_1], %x[expected_1], #0, eq\n\t"\
+ "b.ne 2f\n\t"\
+ "st" st_mo "xp %w[success], %x[desired_0], %x[desired_1], %[storage]\n\t"\
+ "cbnz %w[success], 1b\n\t"\
+ "2:\n\t"\
+ "cset %w[success], eq\n\t"\
+ : [success] "=&r" (success), [storage] "+Q" (storage), [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u])\
+ : [desired_0] "r" (d.as_uint64[0u]), [desired_1] "r" (d.as_uint64[1u]), [expected_0] "r" (e.as_uint64[0u]), [expected_1] "r" (e.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(success_order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ expected = original.as_storage;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "adds %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[original_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "]\n\t"\
+ "adc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[original_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "r" (value.as_uint64[0u]), [value_1] "r" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "subs %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[original_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "]\n\t"\
+ "sbc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[original_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "r" (value.as_uint64[0u]), [value_1] "r" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "and %x[result_0], %x[original_0], %x[value_0]\n\t"\
+ "and %x[result_1], %x[original_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "orr %x[result_0], %x[original_0], %x[value_0]\n\t"\
+ "orr %x[result_1], %x[original_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union value = { v };
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "eor %x[result_0], %x[original_0], %x[value_0]\n\t"\
+ "eor %x[result_1], %x[original_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_alpha.hpp
index 85b1342982..9f29ab083e 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_alpha.hpp
@@ -8,20 +8,20 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_alpha.hpp
+ * \file atomic/detail/core_arch_ops_gcc_alpha.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -62,7 +62,7 @@ namespace detail {
as it apparently does not hurt either.
*/
-struct gcc_alpha_operations_base
+struct core_arch_operations_gcc_alpha_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -87,15 +87,16 @@ struct gcc_alpha_operations_base
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public gcc_alpha_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_alpha_base
{
- typedef typename make_storage_type< 4u >::type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -117,15 +118,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "mov %3, %1\n"
- "ldl_l %0, %2\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "mov %3, %1\n\t"
+ "ldl_l %0, %2\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (tmp) // %1
@@ -145,16 +146,16 @@ struct operations< 4u, Signed > :
storage_type current;
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %2, %4\n" // current = *(&storage)
- "cmpeq %2, %0, %3\n" // success = current == expected
- "mov %2, %0\n" // expected = current
- "beq %3, 2f\n" // if (success == 0) goto end
- "stl_c %1, %4\n" // storage = desired; desired = store succeeded
- "mov %1, %3\n" // success = desired
- "2:\n"
- : "+&r" (expected), // %0
- "+&r" (desired), // %1
+ "1:\n\t"
+ "ldl_l %2, %4\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %3\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "beq %3, 2f\n\t" // if (success == 0) goto end
+ "stl_c %1, %4\n\t" // storage = desired; desired = store succeeded
+ "mov %1, %3\n\t" // success = desired
+ "2:\n\t"
+ : "+r" (expected), // %0
+ "+r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
@@ -175,22 +176,22 @@ struct operations< 4u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "1:\n"
- "mov %5, %1\n" // tmp = desired
- "ldl_l %2, %4\n" // current = *(&storage)
- "cmpeq %2, %0, %3\n" // success = current == expected
- "mov %2, %0\n" // expected = current
- "beq %3, 2f\n" // if (success == 0) goto end
- "stl_c %1, %4\n" // storage = tmp; tmp = store succeeded
- "beq %1, 3f\n" // if (tmp == 0) goto retry
- "mov %1, %3\n" // success = tmp
- "2:\n"
-
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous\n"
-
- : "+&r" (expected), // %0
+ "1:\n\t"
+ "mov %5, %1\n\t" // tmp = desired
+ "ldl_l %2, %4\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %3\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "beq %3, 2f\n\t" // if (success == 0) goto end
+ "stl_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n\t" // if (tmp == 0) goto retry
+ "mov %1, %3\n\t" // success = tmp
+ "2:\n\t"
+
+ ".subsection 2\n\t"
+ "3: br 1b\n\t"
+ ".previous\n\t"
+
+ : "+r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
@@ -211,15 +212,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "addl %0, %3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -237,15 +238,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "subl %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "subl %0, %3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -263,15 +264,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "and %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "and %0, %3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -289,15 +290,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "bis %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "bis %0, %3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -315,15 +316,15 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "xor %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "xor %0, %3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -347,29 +348,29 @@ struct operations< 4u, Signed > :
};
-template< >
-struct operations< 1u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 1u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "zapnot %1, #1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "addl %0, %3, %1\n\t"
+ "zapnot %1, 1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -377,26 +378,26 @@ struct operations< 1u, false > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "subl %0, %3, %1\n"
- "zapnot %1, #1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "subl %0, %3, %1\n\t"
+ "zapnot %1, 1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -404,34 +405,34 @@ struct operations< 1u, false > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 1u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 1u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "sextb %1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "addl %0, %3, %1\n\t"
+ "sextb %1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -439,26 +440,26 @@ struct operations< 1u, true > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "subl %0, %3, %1\n"
- "sextb %1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "subl %0, %3, %1\n\t"
+ "sextb %1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -466,35 +467,35 @@ struct operations< 1u, true > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 2u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 2u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "zapnot %1, #3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "addl %0, %3, %1\n\t"
+ "zapnot %1, 3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -502,26 +503,26 @@ struct operations< 2u, false > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "subl %0, %3, %1\n"
- "zapnot %1, #3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "subl %0, %3, %1\n\t"
+ "zapnot %1, 3, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -529,34 +530,34 @@ struct operations< 2u, false > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 2u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 2u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "sextw %1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "addl %0, %3, %1\n\t"
+ "sextw %1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -564,26 +565,26 @@ struct operations< 2u, true > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldl_l %0, %2\n"
- "subl %0, %3, %1\n"
- "sextw %1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldl_l %0, %2\n\t"
+ "subl %0, %3, %1\n\t"
+ "sextw %1, %1\n\t"
+ "stl_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -591,21 +592,22 @@ struct operations< 2u, true > :
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< bool Signed >
-struct operations< 8u, Signed > :
- public gcc_alpha_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_alpha_base
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -627,15 +629,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "mov %3, %1\n"
- "ldq_l %0, %2\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "mov %3, %1\n\t"
+ "ldq_l %0, %2\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (tmp) // %1
@@ -655,16 +657,16 @@ struct operations< 8u, Signed > :
storage_type current;
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %2, %4\n" // current = *(&storage)
- "cmpeq %2, %0, %3\n" // success = current == expected
- "mov %2, %0\n" // expected = current
- "beq %3, 2f\n" // if (success == 0) goto end
- "stq_c %1, %4\n" // storage = desired; desired = store succeeded
- "mov %1, %3\n" // success = desired
- "2:\n"
- : "+&r" (expected), // %0
- "+&r" (desired), // %1
+ "1:\n\t"
+ "ldq_l %2, %4\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %3\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "beq %3, 2f\n\t" // if (success == 0) goto end
+ "stq_c %1, %4\n\t" // storage = desired; desired = store succeeded
+ "mov %1, %3\n\t" // success = desired
+ "2:\n\t"
+ : "+r" (expected), // %0
+ "+r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
@@ -685,22 +687,22 @@ struct operations< 8u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "1:\n"
- "mov %5, %1\n" // tmp = desired
- "ldq_l %2, %4\n" // current = *(&storage)
- "cmpeq %2, %0, %3\n" // success = current == expected
- "mov %2, %0\n" // expected = current
- "beq %3, 2f\n" // if (success == 0) goto end
- "stq_c %1, %4\n" // storage = tmp; tmp = store succeeded
- "beq %1, 3f\n" // if (tmp == 0) goto retry
- "mov %1, %3\n" // success = tmp
- "2:\n"
-
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous\n"
-
- : "+&r" (expected), // %0
+ "1:\n\t"
+ "mov %5, %1\n\t" // tmp = desired
+ "ldq_l %2, %4\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %3\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "beq %3, 2f\n\t" // if (success == 0) goto end
+ "stq_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n\t" // if (tmp == 0) goto retry
+ "mov %1, %3\n\t" // success = tmp
+ "2:\n\t"
+
+ ".subsection 2\n\t"
+ "3: br 1b\n\t"
+ ".previous\n\t"
+
+ : "+r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
@@ -721,15 +723,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %0, %2\n"
- "addq %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldq_l %0, %2\n\t"
+ "addq %0, %3, %1\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -747,15 +749,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %0, %2\n"
- "subq %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldq_l %0, %2\n\t"
+ "subq %0, %3, %1\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -773,15 +775,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %0, %2\n"
- "and %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldq_l %0, %2\n\t"
+ "and %0, %3, %1\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -799,15 +801,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %0, %2\n"
- "bis %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldq_l %0, %2\n\t"
+ "bis %0, %3, %1\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -825,15 +827,15 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldq_l %0, %2\n"
- "xor %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
+ "1:\n\t"
+ "ldq_l %0, %2\n\t"
+ "xor %0, %3, %1\n\t"
+ "stq_c %1, %2\n\t"
+ "beq %1, 2f\n\t"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
@@ -852,25 +854,14 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("mb" ::: "memory");
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_arm.hpp
index b32159536f..271a26a52a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_arm.hpp
@@ -5,26 +5,28 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_arm.hpp
+ * \file atomic/detail/core_arch_ops_gcc_arm.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/integral_extend.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/integral_conversions.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/gcc_arm_asm_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -53,18 +55,17 @@ namespace detail {
// LDREXH, LDREXB, STREXH and STREXB.
// There are also double-word versions, LDREXD and STREXD.
// (Actually it looks like these are available from version 6k onwards.)
-// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
-// I think you can supply an immediate offset to the address.
-template< bool Signed >
-struct operations< 4u, Signed > :
- public gcc_arm_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_arm_base
{
- typedef typename make_storage_type< 4u >::type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -82,17 +83,17 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original;
fence_before(order);
+ storage_type original;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // load the original value
- "strex %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
- "teq %[tmp], #0\n" // check if store succeeded
- "bne 1b\n"
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // load the original value
+ "strex %[tmp], %[value], %[storage]\n\t" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n\t" // check if store succeeded
+ "bne 1b\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
: [value] "r" (v)
@@ -106,25 +107,28 @@ struct operations< 4u, Signed > :
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "cmp %[original], %[expected]\n" // flags = original==expected
- "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
- "strexeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
- "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "itt eq\n\t" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexeq %[success], %[desired], %[storage]\n\t" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n\t" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (expected), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (expected),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -132,35 +136,38 @@ struct operations< 4u, Signed > :
else
fence_after(failure_order);
expected = original;
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "cmp %[original], %[expected]\n" // flags = original==expected
- "bne 2f\n" // if (!flags.equal) goto end
- "strex %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
- "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
- "beq 1b\n" // if (flags.equal) goto retry
- "2:\n"
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "bne 2f\n\t" // if (!flags.equal) goto end
+ "strex %[success], %[desired], %[storage]\n\t" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n\t" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n\t" // if (flags.equal) goto retry
+ "2:\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (expected), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (expected),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -168,7 +175,7 @@ struct operations< 4u, Signed > :
else
fence_after(failure_order);
expected = original;
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -179,12 +186,12 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -205,12 +212,12 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -231,12 +238,12 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "and %[result], %[original], %[value]\n" // result = original & value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -257,12 +264,12 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -283,12 +290,12 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -308,22 +315,23 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
-template< bool Signed >
-struct operations< 1u, Signed > :
- public gcc_arm_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_gcc_arm_base
{
- typedef typename make_storage_type< 1u >::type storage_type;
- typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u >::type extended_storage_type;
+ typedef typename storage_traits< 1u >::type storage_type;
+ typedef typename storage_traits< 4u >::type extended_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 1u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -341,17 +349,17 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- extended_storage_type original;
fence_before(order);
+ extended_storage_type original;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
- "strexb %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
- "teq %[tmp], #0\n" // check if store succeeded
- "bne 1b\n"
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // load the original value and zero-extend to 32 bits
+ "strexb %[tmp], %[value], %[storage]\n\t" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n\t" // check if store succeeded
+ "bne 1b\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
: [value] "r" (v)
@@ -365,25 +373,28 @@ struct operations< 1u, Signed > :
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
extended_storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "cmp %[original], %[expected]\n" // flags = original==expected
- "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
- "strexbeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
- "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "itt eq\n\t" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexbeq %[success], %[desired], %[storage]\n\t" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n\t" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -391,35 +402,38 @@ struct operations< 1u, Signed > :
else
fence_after(failure_order);
expected = static_cast< storage_type >(original);
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
extended_storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "cmp %[original], %[expected]\n" // flags = original==expected
- "bne 2f\n" // if (!flags.equal) goto end
- "strexb %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
- "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
- "beq 1b\n" // if (flags.equal) goto retry
- "2:\n"
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "bne 2f\n\t" // if (!flags.equal) goto end
+ "strexb %[success], %[desired], %[storage]\n\t" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n\t" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n\t" // if (flags.equal) goto retry
+ "2:\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -427,7 +441,7 @@ struct operations< 1u, Signed > :
else
fence_after(failure_order);
expected = static_cast< storage_type >(original);
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -438,12 +452,12 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "add %[result], %[original], %[value]\n" // result = original + value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -464,12 +478,12 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -490,12 +504,12 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "and %[result], %[original], %[value]\n" // result = original & value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -516,12 +530,12 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -542,12 +556,12 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -567,34 +581,34 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
-template< >
-struct operations< 1u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 1u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "uxtb %[result], %[result]\n\t" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -603,25 +617,25 @@ struct operations< 1u, false > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "uxtb %[result], %[result]\n\t" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -630,33 +644,33 @@ struct operations< 1u, false > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 1u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 1u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "sxtb %[result], %[result]\n\t" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -665,25 +679,25 @@ struct operations< 1u, true > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "sxtb %[result], %[result]\n\t" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -692,7 +706,7 @@ struct operations< 1u, true > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
@@ -701,16 +715,17 @@ struct operations< 1u, true > :
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
-template< bool Signed >
-struct operations< 2u, Signed > :
- public gcc_arm_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_gcc_arm_base
{
- typedef typename make_storage_type< 2u >::type storage_type;
- typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u >::type extended_storage_type;
+ typedef typename storage_traits< 2u >::type storage_type;
+ typedef typename storage_traits< 4u >::type extended_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 2u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -728,17 +743,17 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- extended_storage_type original;
fence_before(order);
+ extended_storage_type original;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
- "strexh %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
- "teq %[tmp], #0\n" // check if store succeeded
- "bne 1b\n"
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // load the original value and zero-extend to 32 bits
+ "strexh %[tmp], %[value], %[storage]\n\t" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n\t" // check if store succeeded
+ "bne 1b\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
: [value] "r" (v)
@@ -752,25 +767,28 @@ struct operations< 2u, Signed > :
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
extended_storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "cmp %[original], %[expected]\n" // flags = original==expected
- "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
- "strexheq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
- "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "itt eq\n\t" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexheq %[success], %[desired], %[storage]\n\t" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n\t" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -778,35 +796,38 @@ struct operations< 2u, Signed > :
else
fence_after(failure_order);
expected = static_cast< storage_type >(original);
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
- uint32_t success;
+ bool success = false;
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
uint32_t tmp;
+#endif
extended_storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "mov %[success], #0\n" // success = 0
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "cmp %[original], %[expected]\n" // flags = original==expected
- "bne 2f\n" // if (!flags.equal) goto end
- "strexh %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
- "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
- "beq 1b\n" // if (flags.equal) goto retry
- "2:\n"
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n\t" // flags = original==expected
+ "bne 2f\n\t" // if (!flags.equal) goto end
+ "strexh %[success], %[desired], %[storage]\n\t" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n\t" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n\t" // if (flags.equal) goto retry
+ "2:\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
- : [original] "=&r" (original), // %0
- [success] "=&r" (success), // %1
- [tmp] "=&l" (tmp), // %2
- [storage] "+Q" (storage) // %3
- : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
- [desired] "r" (desired) // %5
+ : [original] "=&r" (original),
+ [success] "+r" (success),
+#if !defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ [tmp] "=&l" (tmp),
+#endif
+ [storage] "+Q" (storage)
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)),
+ [desired] "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -814,7 +835,7 @@ struct operations< 2u, Signed > :
else
fence_after(failure_order);
expected = static_cast< storage_type >(original);
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -825,12 +846,12 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "add %[result], %[original], %[value]\n" // result = original + value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -851,12 +872,12 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -877,12 +898,12 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "and %[result], %[original], %[value]\n" // result = original & value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -903,12 +924,12 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -929,12 +950,12 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -954,34 +975,34 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
-template< >
-struct operations< 2u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 2u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "uxth %[result], %[result]\n\t" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -990,25 +1011,25 @@ struct operations< 2u, false > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "uxth %[result], %[result]\n\t" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -1017,33 +1038,33 @@ struct operations< 2u, false > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 2u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 2u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "sxth %[result], %[result]\n\t" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -1052,25 +1073,25 @@ struct operations< 2u, true > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fence_before(order);
+ base_type::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "sxth %[result], %[result]\n\t" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -1079,7 +1100,7 @@ struct operations< 2u, true > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
@@ -1099,15 +1120,16 @@ struct operations< 2u, true > :
// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).
// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/
-template< bool Signed >
-struct operations< 8u, Signed > :
- public gcc_arm_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_arm_base
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -1116,40 +1138,51 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ // ARMv7 says ldrex (and other load-exclusive instructions) can be used without a matching strex, see
+ // "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition", Section A3.4.5 "Load-Exclusive and Store-Exclusive usage restrictions".
storage_type original;
+#if defined(BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED)
+ __asm__ __volatile__
+ (
+ "ldrexd %0, %H0, %1\n\t"
+ : "=&r" (original) // %0
+ : "Q" (storage) // %1
+ );
+#else
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "ldrexd %1, %H1, [%2]\n"
+ "ldrexd %1, %H1, %2\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original) // %1
- : "r" (&storage) // %2
+ : "Q" (storage) // %2
);
+#endif
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original;
fence_before(order);
+ storage_type original;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // load the original value
- "strexd %0, %2, %H2, [%3]\n" // store the replacement, tmp = store failed
- "teq %0, #0\n" // check if store succeeded
- "bne 1b\n"
+ "1:\n\t"
+ "ldrexd %1, %H1, %2\n\t" // load the original value
+ "strexd %0, %3, %H3, %2\n\t" // store the replacement, tmp = store failed
+ "teq %0, #0\n\t" // check if store succeeded
+ "bne 1b\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
- "=&r" (original) // %1
- : "r" (v), // %2
- "r" (&storage) // %3
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (original), // %1
+ "+Q" (storage) // %2
+ : "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1159,74 +1192,72 @@ struct operations< 8u, Signed > :
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
+ storage_type original;
+ bool success = false;
uint32_t tmp;
- storage_type original, old_val = expected;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "cmp %1, %2\n" // flags = original.lo==old_val.lo
- "ittt eq\n" // [hint that the following 3 instructions are conditional on flags.equal]
- "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
- "strexdeq %0, %4, %H4, [%3]\n" // if (flags.equal) *(&storage) = desired, tmp = store failed
- "teqeq %0, #0\n" // if (flags.equal) flags = tmp==0
- "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
- "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
- "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "cmp %1, %4\n\t" // flags = original.lo==expected.lo
+ "it eq\n\t" // [hint that the following 1 instruction is conditional on flags.equal]
+ "cmpeq %H1, %H4\n\t" // if (flags.equal) flags = original.hi==expected.hi
+ "bne 1f\n\t"
+ "strexd %2, %5, %H5, %3\n\t" // *(&storage) = desired, success = store failed
+ "eor %2, %2, #1\n\t" // success ^= 1 (i.e. make it 1 if store succeeded)
+ "1:\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "+r" (old_val) // %2
- : "r" (&storage), // %3
- "r" (desired) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "+r" (success), // %2
+ "+Q" (storage) // %3
+ : "r" (expected), // %4
+ "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- const uint32_t success = (uint32_t)old_val;
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
+ storage_type original;
+ bool success = false;
uint32_t tmp;
- storage_type original, old_val = expected;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "cmp %1, %2\n" // flags = original.lo==old_val.lo
- "it eq\n" // [hint that the following instruction is conditional on flags.equal]
- "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
- "bne 2f\n" // if (!flags.equal) goto end
- "strexd %0, %4, %H4, [%3]\n" // *(&storage) = desired, tmp = store failed
- "teq %0, #0\n" // flags.equal = tmp == 0
- "bne 1b\n" // if (flags.equal) goto retry
- "2:\n"
- "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
- "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
- "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "cmp %1, %4\n\t" // flags = original.lo==expected.lo
+ "it eq\n\t" // [hint that the following 1 instruction is conditional on flags.equal]
+ "cmpeq %H1, %H4\n\t" // if (flags.equal) flags = original.hi==expected.hi
+ "bne 2f\n\t"
+ "strexd %2, %5, %H5, %3\n\t" // *(&storage) = desired, success = store failed
+ "eors %2, %2, #1\n\t" // success ^= 1 (i.e. make it 1 if store succeeded), flags.equal = success == 0
+ "beq 1b\n\t" // if (flags.equal) goto retry
+ "2:\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "+r" (old_val) // %2
- : "r" (&storage), // %3
- "r" (desired) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "+r" (success), // %2
+ "+Q" (storage) // %3
+ : "r" (expected), // %4
+ "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- const uint32_t success = (uint32_t)old_val;
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
- return !!success;
+ return success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -1237,20 +1268,20 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "adds %2, %1, %4\n" // result = original + value
- "adc %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "adds " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(4) "\n\t" // result = original + value
+ "adc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(4) "\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1264,20 +1295,20 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "subs %2, %1, %4\n" // result = original - value
- "sbc %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "subs " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(4) "\n\t" // result = original - value
+ "sbc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(4) "\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1291,20 +1322,20 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "and %2, %1, %4\n" // result = original & value
- "and %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "and %2, %1, %4\n\t" // result = original & value
+ "and %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1318,20 +1349,20 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "orr %2, %1, %4\n" // result = original | value
- "orr %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "orr %2, %1, %4\n\t" // result = original | value
+ "orr %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1345,20 +1376,20 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "eor %2, %1, %4\n" // result = original ^ value
- "eor %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "eor %2, %1, %4\n\t" // result = original ^ value
+ "eor %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
@@ -1371,27 +1402,16 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- gcc_arm_operations_base::hardware_full_fence();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp
index a826736d17..a35c4ad022 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp
@@ -8,21 +8,23 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_ppc.hpp
+ * \file atomic/detail/core_arch_ops_gcc_ppc.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/gcc_ppc_asm_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -79,15 +81,16 @@ namespace detail {
to pose a problem.
*/
-template< bool Signed >
-struct operations< 4u, Signed > :
- public gcc_ppc_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_ppc_base
{
- typedef typename make_storage_type< 4u >::type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -111,8 +114,8 @@ struct operations< 4u, Signed > :
(
"lwz %0, %1\n\t"
"cmpw %0, %0\n\t"
- "bne- 1f\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+4")
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"isync\n\t"
: "=&r" (v)
: "m" (storage)
@@ -137,10 +140,10 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y1\n\t"
"stwcx. %2,%y1\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-8")
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -159,11 +162,11 @@ struct operations< 4u, Signed > :
"li %1, 0\n\t"
"lwarx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stwcx. %4,%y2\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+8")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -183,13 +186,14 @@ struct operations< 4u, Signed > :
__asm__ __volatile__
(
"li %1, 0\n\t"
- "0: lwarx %0,%y2\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("0")
+ "lwarx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stwcx. %4,%y2\n\t"
- "bne- 0b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "0b", "-16")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -207,11 +211,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -226,11 +230,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -245,11 +249,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -264,11 +268,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -283,11 +287,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -303,21 +307,22 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
-template< bool Signed >
-struct operations< 1u, Signed > :
- public gcc_ppc_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_gcc_ppc_base
{
- typedef typename make_storage_type< 1u >::type storage_type;
- typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 1u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 1u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -341,8 +346,8 @@ struct operations< 1u, Signed > :
(
"lbz %0, %1\n\t"
"cmpw %0, %0\n\t"
- "bne- 1f\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+4")
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"isync\n\t"
: "=&r" (v)
: "m" (storage)
@@ -367,10 +372,10 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y1\n\t"
"stbcx. %2,%y1\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-8")
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -389,11 +394,11 @@ struct operations< 1u, Signed > :
"li %1, 0\n\t"
"lbarx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stbcx. %4,%y2\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+8")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -413,13 +418,14 @@ struct operations< 1u, Signed > :
__asm__ __volatile__
(
"li %1, 0\n\t"
- "0: lbarx %0,%y2\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("0")
+ "lbarx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stbcx. %4,%y2\n\t"
- "bne- 0b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "0b", "-16")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -437,11 +443,11 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -456,11 +462,11 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -475,11 +481,11 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -494,11 +500,11 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -513,11 +519,11 @@ struct operations< 1u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -533,104 +539,104 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
-template< >
-struct operations< 1u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 1u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"rlwinm %1, %1, 0, 0xff\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"rlwinm %1, %1, 0, 0xff\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 1u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 1u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"extsb %1, %1\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"extsb %1, %1\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
@@ -639,15 +645,16 @@ struct operations< 1u, true > :
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
-template< bool Signed >
-struct operations< 2u, Signed > :
- public gcc_ppc_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_gcc_ppc_base
{
- typedef typename make_storage_type< 2u >::type storage_type;
- typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 2u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 2u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -671,8 +678,8 @@ struct operations< 2u, Signed > :
(
"lhz %0, %1\n\t"
"cmpw %0, %0\n\t"
- "bne- 1f\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+4")
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"isync\n\t"
: "=&r" (v)
: "m" (storage)
@@ -697,10 +704,10 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y1\n\t"
"sthcx. %2,%y1\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-8")
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -719,11 +726,11 @@ struct operations< 2u, Signed > :
"li %1, 0\n\t"
"lharx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"sthcx. %4,%y2\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+8")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -743,13 +750,14 @@ struct operations< 2u, Signed > :
__asm__ __volatile__
(
"li %1, 0\n\t"
- "0: lharx %0,%y2\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("0")
+ "lharx %0,%y2\n\t"
"cmpw %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"sthcx. %4,%y2\n\t"
- "bne- 0b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "0b", "-16")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -767,11 +775,11 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -786,11 +794,11 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -805,11 +813,11 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -824,11 +832,11 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -843,11 +851,11 @@ struct operations< 2u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -863,104 +871,104 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
-template< >
-struct operations< 2u, false > :
- public operations< 4u, false >
+template< bool Interprocess >
+struct core_arch_operations< 2u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
{
- typedef operations< 4u, false > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"rlwinm %1, %1, 0, 0xffff\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"rlwinm %1, %1, 0, 0xffff\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
-template< >
-struct operations< 2u, true > :
- public operations< 4u, true >
+template< bool Interprocess >
+struct core_arch_operations< 2u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
{
- typedef operations< 4u, true > base_type;
- typedef base_type::storage_type storage_type;
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"extsh %1, %1\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- fence_before(order);
+ base_type::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"extsh %1, %1\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-16")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- fence_after(order);
+ base_type::fence_after(order);
return original;
}
};
@@ -969,15 +977,16 @@ struct operations< 2u, true > :
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
-template< bool Signed >
-struct operations< 8u, Signed > :
- public gcc_ppc_operations_base
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_ppc_base
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -1001,8 +1010,8 @@ struct operations< 8u, Signed > :
(
"ld %0, %1\n\t"
"cmpd %0, %0\n\t"
- "bne- 1f\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+4")
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"isync\n\t"
: "=&b" (v)
: "m" (storage)
@@ -1027,10 +1036,10 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y1\n\t"
"stdcx. %2,%y1\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-8")
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -1049,11 +1058,11 @@ struct operations< 8u, Signed > :
"li %1, 0\n\t"
"ldarx %0,%y2\n\t"
"cmpd %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stdcx. %4,%y2\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+8")
"li %1, 1\n\t"
- "1:"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -1073,13 +1082,14 @@ struct operations< 8u, Signed > :
__asm__ __volatile__
(
"li %1, 0\n\t"
- "0: ldarx %0,%y2\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("0")
+ "ldarx %0,%y2\n\t"
"cmpd %0, %3\n\t"
- "bne- 1f\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1f", "+16")
"stdcx. %4,%y2\n\t"
- "bne- 0b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "0b", "-16")
"li %1, 1\n\t"
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -1097,11 +1107,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -1116,11 +1126,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -1135,11 +1145,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -1154,11 +1164,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -1173,11 +1183,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -1193,40 +1203,16 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- store(storage, 0, order);
+ store(storage, (storage_type)0, order);
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- {
-#if defined(__powerpc64__) || defined(__PPC64__)
- if (order != memory_order_seq_cst)
- __asm__ __volatile__ ("lwsync" ::: "memory");
- else
- __asm__ __volatile__ ("sync" ::: "memory");
-#else
- __asm__ __volatile__ ("sync" ::: "memory");
-#endif
- }
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
-#if defined(__ibmxl__) || defined(__IBMCPP__)
- __fence();
-#else
- __asm__ __volatile__ ("" ::: "memory");
-#endif
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_sparc.hpp
index 19b9b1fa87..aad7ec7e5e 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_sparc.hpp
@@ -8,22 +8,23 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_sparc.hpp
+ * \file atomic/detail/core_arch_ops_gcc_sparc.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
-#include <boost/atomic/detail/ops_cas_based.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/core_ops_cas_based.hpp>
+#include <boost/atomic/detail/cas_based_exchange.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -61,15 +62,16 @@ struct gcc_sparc_cas_base
}
};
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct gcc_sparc_cas32 :
public gcc_sparc_cas_base
{
- typedef typename make_storage_type< 4u >::type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -127,33 +129,34 @@ struct gcc_sparc_cas32 :
}
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public cas_based_operations< gcc_sparc_cas32< Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_operations_cas_based< gcc_sparc_cas32< Signed, Interprocess > >
{
};
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct gcc_sparc_cas64 :
public gcc_sparc_cas_base
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -197,44 +200,16 @@ struct gcc_sparc_cas64 :
}
};
-template< bool Signed >
-struct operations< 8u, Signed > :
- public cas_based_operations< cas_based_exchange< gcc_sparc_cas64< Signed > > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_operations_cas_based< cas_based_exchange< gcc_sparc_cas64< Signed, Interprocess > > >
{
};
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- switch (order)
- {
- case memory_order_release:
- __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
- break;
- case memory_order_consume:
- case memory_order_acquire:
- __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
- break;
- case memory_order_acq_rel:
- __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
- break;
- case memory_order_seq_cst:
- __asm__ __volatile__ ("membar #Sync" ::: "memory");
- break;
- case memory_order_relaxed:
- default:
- break;
- }
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_x86.hpp
index 4206bb39ef..7f41ff843d 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_gcc_x86.hpp
@@ -5,23 +5,30 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2014 - 2018 Andrey Semashev
+ * Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_x86_dcas.hpp
+ * \file atomic/detail/core_arch_ops_gcc_x86.hpp
*
- * This header contains implementation of the double-width CAS primitive for x86.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
-#include <boost/cstdint.hpp>
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/intptr.hpp>
#include <boost/atomic/detail/string_ops.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/core_ops_cas_based.hpp>
+#endif
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -31,6 +38,370 @@ namespace boost {
namespace atomics {
namespace detail {
+struct core_arch_operations_gcc_x86_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
+struct core_arch_operations_gcc_x86 :
+ public core_arch_operations_gcc_x86_base
+{
+ typedef typename storage_traits< Size >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_seq_cst)
+ {
+ fence_before(order);
+ storage = v;
+ fence_after(order);
+ }
+ else
+ {
+ Derived::exchange(storage, v, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return Derived::fetch_add(storage, -v, order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_gcc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
+{
+ typedef core_arch_operations_gcc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename storage_traits< 4u >::type temp_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%al, %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_gcc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
+{
+ typedef core_arch_operations_gcc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename storage_traits< 4u >::type temp_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%ax, %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
+{
+ typedef core_arch_operations_gcc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[new_val]\n\t"\
+ op " %%eax, %[new_val]\n\t"\
+ "lock; cmpxchgl %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [arg] "ir" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
// Note: In the 32-bit PIC code guarded with BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX below we have to avoid using memory
// operand constraints because the compiler may choose to use ebx as the base register for that operand. At least, clang
// is known to do that. For this reason we have to pre-compute a pointer to storage and pass it in edi. For the same reason
@@ -39,21 +410,22 @@ namespace detail {
// The need to pass a pointer in edi is a bit wasteful because normally the memory operand would use a base pointer
// with an offset (e.g. `this` + offset). But unfortunately, there seems to be no way around it.
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
-
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct gcc_dcas_x86
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
typedef uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint32_t;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
- if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
+ if (BOOST_LIKELY((((uintptr_t)&storage) & 0x00000007) == 0u))
{
#if defined(__SSE__)
typedef float xmm_t __attribute__((__vector_size__(16)));
@@ -123,7 +495,7 @@ struct gcc_dcas_x86
{
storage_type value;
- if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
+ if (BOOST_LIKELY((((uintptr_t)&storage) & 0x00000007) == 0u))
{
#if defined(__SSE__)
typedef float xmm_t __attribute__((__vector_size__(16)));
@@ -375,17 +747,124 @@ struct gcc_dcas_x86
}
};
-#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_operations_cas_based< gcc_dcas_x86< Signed, Interprocess > >
+{
+};
+
+#elif defined(__x86_64__)
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
+{
+ typedef core_arch_operations_gcc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movq %[arg], %[new_val]\n\t"\
+ op " %%rax, %[new_val]\n\t"\
+ "lock; cmpxchgq %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [arg] "r" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct gcc_dcas_x86_64
{
- typedef typename make_storage_type< 16u >::type storage_type;
- typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 16u >::type storage_type;
typedef uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint64_t;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -547,10 +1026,18 @@ struct gcc_dcas_x86_64
}
};
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 16u, Signed, Interprocess > :
+ public core_operations_cas_based< gcc_dcas_x86_64< Signed, Interprocess > >
+{
+};
+
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_arm.hpp
index 608c6fddf8..b8fe201b71 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_arm.hpp
@@ -8,29 +8,50 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_msvc_arm.hpp
+ * \file atomic/detail/core_arch_ops_msvc_arm.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
-#include <intrin.h>
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
-#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/fence_arch_operations.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
+extern "C" {
+__int8 __iso_volatile_load8(const volatile __int8*);
+__int16 __iso_volatile_load16(const volatile __int16*);
+__int32 __iso_volatile_load32(const volatile __int32*);
+__int64 __iso_volatile_load64(const volatile __int64*);
+void __iso_volatile_store8(volatile __int8*, __int8);
+void __iso_volatile_store16(volatile __int16*, __int16);
+void __iso_volatile_store32(volatile __int32*, __int32);
+void __iso_volatile_store64(volatile __int64*, __int64);
+}
+#if defined(BOOST_MSVC)
+#pragma intrinsic(__iso_volatile_load8)
+#pragma intrinsic(__iso_volatile_load16)
+#pragma intrinsic(__iso_volatile_load32)
+#pragma intrinsic(__iso_volatile_load64)
+#pragma intrinsic(__iso_volatile_store8)
+#pragma intrinsic(__iso_volatile_store16)
+#pragma intrinsic(__iso_volatile_store32)
+#pragma intrinsic(__iso_volatile_store64)
+#endif
+
#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
@@ -52,22 +73,17 @@ namespace detail {
// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
// For this reason we promote memory_order_consume to memory_order_acquire.
-struct msvc_arm_operations_base
+struct core_arch_operations_msvc_arm_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
- __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
- }
-
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
- hardware_full_fence();
+ fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
@@ -77,7 +93,7 @@ struct msvc_arm_operations_base
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
- hardware_full_fence();
+ fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
@@ -87,7 +103,7 @@ struct msvc_arm_operations_base
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
- hardware_full_fence();
+ fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
@@ -100,15 +116,16 @@ struct msvc_arm_operations_base
}
};
-template< std::size_t Size, bool Signed, typename Derived >
-struct msvc_arm_operations :
- public msvc_arm_operations_base
+template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
+struct core_arch_operations_msvc_arm :
+ public core_arch_operations_msvc_arm_base
{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+ typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -133,11 +150,11 @@ struct msvc_arm_operations :
}
};
-template< bool Signed >
-struct operations< 1u, Signed > :
- public msvc_arm_operations< 1u, Signed, operations< 1u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
- typedef msvc_arm_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -205,7 +222,7 @@ struct operations< 1u, Signed > :
{
storage_type previous = expected, old_val;
- switch (cas_common_order(success_order, failure_order))
+ switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
@@ -298,11 +315,11 @@ struct operations< 1u, Signed > :
}
};
-template< bool Signed >
-struct operations< 2u, Signed > :
- public msvc_arm_operations< 2u, Signed, operations< 2u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
- typedef msvc_arm_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -370,7 +387,7 @@ struct operations< 2u, Signed > :
{
storage_type previous = expected, old_val;
- switch (cas_common_order(success_order, failure_order))
+ switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
@@ -463,11 +480,11 @@ struct operations< 2u, Signed > :
}
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public msvc_arm_operations< 4u, Signed, operations< 4u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
{
- typedef msvc_arm_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -535,7 +552,7 @@ struct operations< 4u, Signed > :
{
storage_type previous = expected, old_val;
- switch (cas_common_order(success_order, failure_order))
+ switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
@@ -628,11 +645,11 @@ struct operations< 4u, Signed > :
}
};
-template< bool Signed >
-struct operations< 8u, Signed > :
- public msvc_arm_operations< 8u, Signed, operations< 8u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
{
- typedef msvc_arm_operations< 8u, Signed, operations< 8u, Signed > > base_type;
+ typedef core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -700,7 +717,7 @@ struct operations< 8u, Signed > :
{
storage_type previous = expected, old_val;
- switch (cas_common_order(success_order, failure_order))
+ switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
@@ -793,21 +810,6 @@ struct operations< 8u, Signed > :
}
};
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
- if (order != memory_order_relaxed)
- msvc_arm_operations_base::hardware_full_fence();
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
@@ -821,4 +823,6 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
-#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_x86.hpp
index 70b0ea994b..29c9afb19a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_arch_ops_msvc_x86.hpp
@@ -8,48 +8,39 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_msvc_x86.hpp
+ * \file atomic/detail/core_arch_ops_msvc_x86.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_arch_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
#include <cstddef>
+#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
#include <boost/atomic/detail/interlocked.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#include <boost/cstdint.hpp>
-#include <boost/atomic/detail/ops_cas_based.hpp>
+#include <boost/atomic/detail/cas_based_exchange.hpp>
+#include <boost/atomic/detail/core_ops_cas_based.hpp>
#endif
#include <boost/atomic/detail/ops_msvc_common.hpp>
#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#endif
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// frame pointer register 'ebx' modified by inline assembly code. See the note below.
-#pragma warning(disable: 4731)
-#endif
-
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
-extern "C" void _mm_mfence(void);
-#if defined(BOOST_MSVC)
-#pragma intrinsic(_mm_mfence)
-#endif
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
@@ -67,25 +58,15 @@ namespace detail {
* Either move the eight-byte aligned types out of the function, or avoid using EBX.
*
* Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
- * whenever we have to clobber it. Additionally, we disable warning C4731 above so that the compiler
+ * whenever we have to clobber it. Additionally, we disable warning C4731 in header.hpp so that the compiler
* doesn't spam about ebx use.
*/
-struct msvc_x86_operations_base
+struct core_arch_operations_msvc_x86_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
- _mm_mfence();
-#else
- long tmp;
- BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
-#endif
- }
-
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -108,15 +89,16 @@ struct msvc_x86_operations_base
}
};
-template< std::size_t Size, bool Signed, typename Derived >
-struct msvc_x86_operations :
- public msvc_x86_operations_base
+template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
+struct core_arch_operations_msvc_x86 :
+ public core_arch_operations_msvc_x86_base
{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+ typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -162,11 +144,11 @@ struct msvc_x86_operations :
}
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public msvc_x86_operations< 4u, Signed, operations< 4u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -233,11 +215,11 @@ struct operations< 4u, Signed > :
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
-template< bool Signed >
-struct operations< 1u, Signed > :
- public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -277,11 +259,11 @@ struct operations< 1u, Signed > :
#elif defined(_M_IX86)
-template< bool Signed >
-struct operations< 1u, Signed > :
- public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -398,9 +380,9 @@ struct operations< 1u, Signed > :
#else
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
@@ -408,11 +390,11 @@ struct operations< 1u, Signed > :
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
-template< bool Signed >
-struct operations< 2u, Signed > :
- public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -452,11 +434,11 @@ struct operations< 2u, Signed > :
#elif defined(_M_IX86)
-template< bool Signed >
-struct operations< 2u, Signed > :
- public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -573,9 +555,9 @@ struct operations< 2u, Signed > :
#else
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
@@ -584,16 +566,17 @@ struct operations< 2u, Signed > :
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct msvc_dcas_x86
{
- typedef typename make_storage_type< 8u >::type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 8u >::type storage_type;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
@@ -609,7 +592,7 @@ struct msvc_dcas_x86
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type volatile* p = &storage;
- if (((uint32_t)p & 0x00000007) == 0)
+ if (((uintptr_t)p & 0x00000007) == 0)
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
@@ -665,7 +648,7 @@ struct msvc_dcas_x86
storage_type const volatile* p = &storage;
storage_type value;
- if (((uint32_t)p & 0x00000007) == 0)
+ if (((uintptr_t)p & 0x00000007) == 0)
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
@@ -783,19 +766,19 @@ struct msvc_dcas_x86
}
};
-template< bool Signed >
-struct operations< 8u, Signed > :
- public cas_based_operations< msvc_dcas_x86< Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_operations_cas_based< msvc_dcas_x86< Signed, Interprocess > >
{
};
#elif defined(_M_AMD64)
-template< bool Signed >
-struct operations< 8u, Signed > :
- public msvc_x86_operations< 8u, Signed, operations< 8u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
{
- typedef msvc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
+ typedef core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -837,16 +820,17 @@ struct operations< 8u, Signed > :
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct msvc_dcas_x86_64
{
- typedef typename make_storage_type< 16u >::type storage_type;
- typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 16u >::type storage_type;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 16u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -875,34 +859,18 @@ struct msvc_dcas_x86_64
}
};
-template< bool Signed >
-struct operations< 16u, Signed > :
- public cas_based_operations< cas_based_exchange< msvc_dcas_x86_64< Signed > > >
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 16u, Signed, Interprocess > :
+ public core_operations_cas_based< cas_based_exchange< msvc_dcas_x86_64< Signed, Interprocess > > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
- if (order == memory_order_seq_cst)
- msvc_x86_operations_base::hardware_full_fence();
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
+#include <boost/atomic/detail/footer.hpp>
-#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations.hpp
new file mode 100644
index 0000000000..d4bd187224
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations.hpp
@@ -0,0 +1,49 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_operations.hpp
+ *
+ * This header defines core atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/core_arch_operations.hpp>
+#include <boost/atomic/detail/core_operations_fwd.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/core_ops_)
+#endif
+
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Default specialization that falls back to architecture-specific implementation
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_operations :
+ public core_arch_operations< Size, Signed, Interprocess >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated.hpp
new file mode 100644
index 0000000000..03af21e727
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated.hpp
@@ -0,0 +1,195 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_operations_emulated.hpp
+ *
+ * This header contains lock pool-based implementation of the core atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_operations_emulated_fwd.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< std::size_t Size, std::size_t Alignment, bool = Alignment >= storage_traits< Size >::native_alignment >
+struct core_operations_emulated_base
+{
+ typedef typename storage_traits< Size >::type storage_type;
+};
+
+template< std::size_t Size, std::size_t Alignment >
+struct core_operations_emulated_base< Size, Alignment, false >
+{
+ typedef buffer_storage< Size, Alignment > storage_type;
+};
+
+//! Emulated implementation of core atomic operations
+template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
+struct core_operations_emulated :
+ public core_operations_emulated_base< Size, Alignment >
+{
+ typedef core_operations_emulated_base< Size, Alignment > base_type;
+
+ // Define storage_type to have alignment not greater than Alignment. This will allow operations to work with value_types
+ // that possibly have weaker alignment requirements than storage_traits< Size >::type would. This is important for atomic_ref<>.
+ // atomic<> will allow higher alignment requirement than its value_type.
+ // Note that storage_type should be an integral type, if possible, so that arithmetic and bitwise operations are possible.
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = Alignment >= storage_traits< Size >::alignment ? storage_traits< Size >::alignment : Alignment;
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = false;
+
+ typedef lock_pool::scoped_lock< storage_alignment > scoped_lock;
+
+ static void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ scoped_lock lock(&storage);
+ const_cast< storage_type& >(storage) = v;
+ }
+
+ static storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ scoped_lock lock(&storage);
+ return const_cast< storage_type const& >(storage);
+ }
+
+ static storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s += v;
+ return old_val;
+ }
+
+ static storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s -= v;
+ return old_val;
+ }
+
+ static storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s = v;
+ return old_val;
+ }
+
+ static bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ const bool res = old_val == expected;
+ if (res)
+ s = desired;
+ expected = old_val;
+
+ return res;
+ }
+
+ static bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ // Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
+ // is that MSVC-12 ICEs in this case.
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ const bool res = old_val == expected;
+ if (res)
+ s = desired;
+ expected = old_val;
+
+ return res;
+ }
+
+ static storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s &= v;
+ return old_val;
+ }
+
+ static storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s |= v;
+ return old_val;
+ }
+
+ static storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type& s = const_cast< storage_type& >(storage);
+ scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s ^= v;
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated_fwd.hpp
new file mode 100644
index 0000000000..d3d9ccf862
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_emulated_fwd.hpp
@@ -0,0 +1,38 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_operations_emulated_fwd.hpp
+ *
+ * This header forward-declares lock pool-based implementation of the core atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
+struct core_operations_emulated;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_fwd.hpp
new file mode 100644
index 0000000000..9f2fffaa6d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_operations_fwd.hpp
@@ -0,0 +1,38 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c core_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_cas_based.hpp
index e2e18aa384..0d617e1ea3 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_cas_based.hpp
@@ -6,17 +6,17 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_cas_based.hpp
+ * \file atomic/detail/core_ops_cas_based.hpp
*
- * This header contains CAS-based implementation of the \c operations template.
+ * This header contains CAS-based implementation of core atomic operations.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -27,22 +27,7 @@ namespace atomics {
namespace detail {
template< typename Base >
-struct cas_based_exchange :
- public Base
-{
- typedef typename Base::storage_type storage_type;
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- storage_type old_val;
- atomics::detail::non_atomic_load(storage, old_val);
- while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
- return old_val;
- }
-};
-
-template< typename Base >
-struct cas_based_operations :
+struct core_operations_cas_based :
public Base
{
typedef typename Base::storage_type storage_type;
@@ -104,4 +89,6 @@ struct cas_based_operations :
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_atomic.hpp
new file mode 100644
index 0000000000..25b4eac402
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_atomic.hpp
@@ -0,0 +1,306 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_ops_gcc_atomic.hpp
+ *
+ * This header contains implementation of the \c core_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_operations_fwd.hpp>
+#include <boost/atomic/detail/core_arch_operations.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
+
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE || BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE ||\
+ BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE || BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
+// There are platforms where we need to use larger storage types
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
+#endif
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__INTEL_COMPILER)
+// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
+// for Intel Compiler.
+// In debug builds the compiler does not inline any functions, so basically
+// every atomic function call results in this warning. I don't know any other
+// way to selectively disable just this one warning.
+#pragma system_header
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_operations_gcc_atomic
+{
+ typedef typename storage_traits< Size >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+
+ // Note: In the current implementation, core_operations_gcc_atomic are used only when the particularly sized __atomic
+ // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
+ // always set is_always_lock_free to true here.
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_GCC) && BOOST_GCC < 100100 && (defined(__x86_64__) || defined(__i386__))
+ // gcc up to 10.1 generates mov + mfence for seq_cst stores, which is slower than xchg
+ if (order != memory_order_seq_cst)
+ __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ else
+ __atomic_exchange_n(&storage, v, __ATOMIC_SEQ_CST);
+#else
+ __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
+ // At least gcc 9.3 and clang 10 do not generate relaxed ldapr instructions that are available in ARMv8.3-RCPC extension.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95751
+ typedef atomics::detail::core_arch_operations< storage_size, is_signed, is_interprocess > core_arch_operations;
+ return core_arch_operations::load(storage, order);
+#else
+ return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, false,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, true,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
+ }
+};
+
+// We want to only enable __atomic* intrinsics when the corresponding BOOST_ATOMIC_DETAIL_GCC_ATOMIC_*_LOCK_FREE macro indicates
+// the same or better lock-free guarantees as the BOOST_ATOMIC_*_LOCK_FREE macro. Otherwise, we want to leave core_operations
+// unspecialized, so that core_arch_operations is used instead.
+
+#if BOOST_ATOMIC_INT128_LOCK_FREE > 0 && BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 16u, Signed, Interprocess > :
+ public core_operations_gcc_atomic< 16u, Signed, Interprocess >
+{
+};
+
+#endif
+
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 8u, Signed, Interprocess > :
+ public core_operations_gcc_atomic< 8u, Signed, Interprocess >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 8u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 8u, Signed >
+{
+};
+
+#endif
+#endif // BOOST_ATOMIC_INT64_LOCK_FREE > 0
+
+
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 4u, Signed, Interprocess > :
+ public core_operations_gcc_atomic< 4u, Signed, Interprocess >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 4u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 4u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 8u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 4u, Signed >
+{
+};
+
+#endif
+#endif // BOOST_ATOMIC_INT32_LOCK_FREE > 0
+
+
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public core_operations_gcc_atomic< 2u, Signed, Interprocess >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 2u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 2u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 2u, Signed >
+{
+};
+
+#endif
+#endif // BOOST_ATOMIC_INT16_LOCK_FREE > 0
+
+
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public core_operations_gcc_atomic< 1u, Signed, Interprocess >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 2u, Signed, Interprocess >, 1u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 1u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 1u, Signed >
+{
+};
+
+#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
+
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 1u, Signed >
+{
+};
+
+#endif
+#endif // BOOST_ATOMIC_INT8_LOCK_FREE > 0
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_sync.hpp
index 1597de852a..34fd5eedef 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_gcc_sync.hpp
@@ -8,21 +8,23 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_gcc_sync.hpp
+ * \file atomic/detail/core_ops_gcc_sync.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_operations_fwd.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -32,7 +34,7 @@ namespace boost {
namespace atomics {
namespace detail {
-struct gcc_sync_operations_base
+struct core_operations_gcc_sync_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -56,30 +58,61 @@ struct gcc_sync_operations_base
}
};
-template< std::size_t Size, bool Signed >
-struct gcc_sync_operations :
- public gcc_sync_operations_base
+template< std::size_t Size, bool Signed, bool Interprocess >
+struct core_operations_gcc_sync :
+ public core_operations_gcc_sync_base
{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+ typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< storage_size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ // In general, we cannot guarantee atomicity of plain loads and stores of anything larger than a single byte on
+ // an arbitrary CPU architecture. However, all modern architectures seem to guarantee atomic loads and stores of
+ // suitably aligned objects of up to a pointer size. For larger objects we should probably use intrinsics to guarantee
+ // atomicity. If there appears an architecture where this doesn't hold, this threshold needs to be updated (patches are welcome).
+ typedef atomics::detail::integral_constant< bool, storage_size <= sizeof(void*) > plain_stores_loads_are_atomic;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ store(storage, v, order, plain_stores_loads_are_atomic());
+ }
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
+ {
fence_before_store(order);
storage = v;
fence_after_store(order);
}
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::false_type) BOOST_NOEXCEPT
+ {
+ exchange(storage, v, order);
+ }
+
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ return load(storage, order, plain_stores_loads_are_atomic());
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
+ {
storage_type v = storage;
fence_after_load(order);
return v;
}
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order, atomics::detail::false_type) BOOST_NOEXCEPT
+ {
+ // Note: don't use fetch_add or other arithmetics here since storage_type may not be an arithmetic type.
+ storage_type expected = storage_type();
+ storage_type desired = expected;
+ // We don't care if CAS succeeds or not. If it does, it will just write the same value there was before.
+ return __sync_val_compare_and_swap(const_cast< storage_type volatile* >(&storage), expected, desired);
+ }
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_add(&storage, v);
@@ -154,87 +187,77 @@ struct gcc_sync_operations :
};
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
-template< bool Signed >
-struct operations< 1u, Signed > :
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
- public gcc_sync_operations< 1u, Signed >
+ public core_operations_gcc_sync< 1u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- public extending_cas_based_operations< gcc_sync_operations< 2u, Signed >, 1u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 2u, Signed, Interprocess >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 1u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 1u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 1u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 1u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 1u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
-template< bool Signed >
-struct operations< 2u, Signed > :
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- public gcc_sync_operations< 2u, Signed >
+ public core_operations_gcc_sync< 2u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 2u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 2u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 2u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 2u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 2u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 2u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
-template< bool Signed >
-struct operations< 4u, Signed > :
+template< bool Signed, bool Interprocess >
+struct core_operations< 4u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public gcc_sync_operations< 4u, Signed >
+ public core_operations_gcc_sync< 4u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 4u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 4u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 4u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 4u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
-template< bool Signed >
-struct operations< 8u, Signed > :
+template< bool Signed, bool Interprocess >
+struct core_operations< 8u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public gcc_sync_operations< 8u, Signed >
+ public core_operations_gcc_sync< 8u, Signed, Interprocess >
#else
- public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 8u, Signed >
+ public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 8u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
-template< bool Signed >
-struct operations< 16u, Signed > :
- public gcc_sync_operations< 16u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_operations< 16u, Signed, Interprocess > :
+ public core_operations_gcc_sync< 16u, Signed, Interprocess >
{
};
#endif
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __sync_synchronize();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_linux_arm.hpp
index 16af1732cf..f00cde2d20 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_linux_arm.hpp
@@ -10,22 +10,24 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_linux_arm.hpp
+ * \file atomic/detail/core_ops_linux_arm.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_operations template.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
-#include <boost/atomic/detail/ops_cas_based.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_operations_fwd.hpp>
+#include <boost/atomic/detail/core_ops_cas_based.hpp>
+#include <boost/atomic/detail/cas_based_exchange.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
+#include <boost/atomic/detail/fence_operations.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -44,6 +46,8 @@ namespace detail {
// this facility is slightly slower than inline assembler would be, but much
// faster than a system call.
//
+// https://lwn.net/Articles/314561/
+//
// While this emulated CAS is "strong" in the sense that it does not fail
// "spuriously" (i.e.: it never fails to perform the exchange when the value
// found equals the value expected), it does not return the found value on
@@ -64,37 +68,32 @@ struct linux_arm_cas_base
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
- hardware_full_fence();
+ fence_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
- hardware_full_fence();
+ fence_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
- hardware_full_fence();
- }
-
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
- typedef void (*kernel_dmb_t)(void);
- ((kernel_dmb_t)0xffff0fa0)();
+ fence_operations::hardware_full_fence();
}
};
-template< bool Signed >
+template< bool Signed, bool Interprocess >
struct linux_arm_cas :
public linux_arm_cas_base
{
- typedef typename make_storage_type< 4u >::type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+ typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -143,38 +142,28 @@ struct linux_arm_cas :
}
};
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 1u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 1u, Signed >
{
};
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 2u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 2u, Signed >
{
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >
+template< bool Signed, bool Interprocess >
+struct core_operations< 4u, Signed, Interprocess > :
+ public core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >
{
};
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- linux_arm_cas_base::hardware_full_fence();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_windows.hpp
index d4ce6d95e7..346c33445f 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/core_ops_windows.hpp
@@ -8,9 +8,9 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_windows.hpp
+ * \file atomic/detail/core_ops_windows.hpp
*
- * This header contains implementation of the \c operations template.
+ * This header contains implementation of the \c core_operations template.
*
* This implementation is the most basic version for Windows. It should
* work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
@@ -20,19 +20,19 @@
* versions based on compiler intrinsics.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
-#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -42,17 +42,11 @@ namespace boost {
namespace atomics {
namespace detail {
-struct windows_operations_base
+struct core_operations_windows_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
- long tmp;
- BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
- }
-
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -64,15 +58,16 @@ struct windows_operations_base
}
};
-template< std::size_t Size, bool Signed, typename Derived >
-struct windows_operations :
- public windows_operations_base
+template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
+struct core_operations_windows :
+ public core_operations_windows_base
{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+ typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -107,11 +102,11 @@ struct windows_operations :
}
};
-template< bool Signed >
-struct operations< 4u, Signed > :
- public windows_operations< 4u, Signed, operations< 4u, Signed > >
+template< bool Signed, bool Interprocess >
+struct core_operations< 4u, Signed, bool Interprocess > :
+ public core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > >
{
- typedef windows_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
@@ -185,34 +180,22 @@ struct operations< 4u, Signed > :
}
};
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_operations< 1u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+template< bool Signed, bool Interprocess >
+struct core_operations< 2u, Signed, Interprocess > :
+ public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
- if (order == memory_order_seq_cst)
- windows_operations_base::hardware_full_fence();
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
-}
-
} // namespace detail
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extending_cas_based_arithmetic.hpp
index 5f197cea48..7b8d4c30ef 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extending_cas_based_arithmetic.hpp
@@ -6,19 +6,20 @@
* Copyright (c) 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/ops_extending_cas_based.hpp
+ * \file atomic/detail/extending_cas_based_arithmetic.hpp
*
- * This header contains a boilerplate of the \c operations template implementation that requires sign/zero extension in arithmetic operations.
+ * This header contains a boilerplate of core atomic operations that require sign/zero extension in arithmetic operations.
*/
-#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/integral_extend.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/integral_conversions.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -29,11 +30,11 @@ namespace atomics {
namespace detail {
template< typename Base, std::size_t Size, bool Signed >
-struct extending_cas_based_operations :
+struct extending_cas_based_arithmetic :
public Base
{
typedef typename Base::storage_type storage_type;
- typedef typename make_storage_type< Size >::type emulated_storage_type;
+ typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -66,4 +67,6 @@ struct extending_cas_based_operations :
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_operations_fwd.hpp
index 79bca9d2cd..07ca1ac06d 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_operations_fwd.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_operations_fwd.hpp
@@ -16,6 +16,7 @@
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -25,11 +26,13 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base, typename Value, std::size_t Size, bool = Base::is_always_lock_free >
+template< typename Base, typename Value = typename Base::value_type, std::size_t Size = sizeof(typename Base::storage_type), bool = Base::is_always_lock_free >
struct extra_fp_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_emulated.hpp
index e04b2f50fb..df556758c5 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_emulated.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_emulated.hpp
@@ -15,11 +15,12 @@
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
#include <cstddef>
+#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
-#include <boost/atomic/detail/lockpool.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -29,49 +30,54 @@ namespace boost {
namespace atomics {
namespace detail {
-//! Generic implementation of extra floating point operations
+//! Emulated implementation of extra floating point operations
template< typename Base, typename Value, std::size_t Size >
-struct emulated_extra_fp_operations :
+struct extra_fp_operations_emulated :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
+ typedef typename base_type::scoped_lock scoped_lock;
- static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static value_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = -old_val;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return old_val;
}
- static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static value_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = -old_val;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return new_val;
}
- static BOOST_FORCEINLINE value_type add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ static value_type add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val + v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return new_val;
}
- static BOOST_FORCEINLINE value_type sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ static value_type sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val - v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
@@ -80,23 +86,26 @@ struct emulated_extra_fp_operations :
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_sub(storage, v, order);
}
};
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations< Base, Value, Size, false > :
- public emulated_extra_fp_operations< Base, Value, Size >
+ public extra_fp_operations_emulated< Base, Value, Size >
{
};
@@ -104,4 +113,6 @@ struct extra_fp_operations< Base, Value, Size, false > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_generic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_generic.hpp
index 34902c472c..aefd7d326a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_generic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_fp_ops_generic.hpp
@@ -18,16 +18,17 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/is_iec559.hpp>
#include <boost/atomic/detail/type_traits/is_integral.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 60000
+#if defined(BOOST_GCC) && BOOST_GCC >= 60000
#pragma GCC diagnostic push
// ignoring attributes on template argument X - this warning is because we need to pass storage_type as a template argument; no problem in this case
#pragma GCC diagnostic ignored "-Wignored-attributes"
@@ -46,7 +47,7 @@ template<
, bool = atomics::detail::is_iec559< Value >::value && atomics::detail::is_integral< typename Base::storage_type >::value
#endif
>
-struct generic_extra_fp_negate :
+struct extra_fp_negate_generic :
public Base
{
typedef Base base_type;
@@ -93,7 +94,7 @@ struct generic_extra_fp_negate :
//! Negate implementation for IEEE 754 / IEC 559 floating point types. We leverage the fact that the sign bit is the most significant bit in the value.
template< typename Base, typename Value, std::size_t Size >
-struct generic_extra_fp_negate< Base, Value, Size, true > :
+struct extra_fp_negate_generic< Base, Value, Size, true > :
public Base
{
typedef Base base_type;
@@ -101,7 +102,7 @@ struct generic_extra_fp_negate< Base, Value, Size, true > :
typedef Value value_type;
//! The mask with only one sign bit set to 1
- static BOOST_CONSTEXPR_OR_CONST storage_type sign_mask = static_cast< storage_type >(1u) << (atomics::detail::value_sizeof< value_type >::value * 8u - 1u);
+ static BOOST_CONSTEXPR_OR_CONST storage_type sign_mask = static_cast< storage_type >(1u) << (atomics::detail::value_size_of< value_type >::value * 8u - 1u);
static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
@@ -123,10 +124,10 @@ struct generic_extra_fp_negate< Base, Value, Size, true > :
//! Generic implementation of floating point operations
template< typename Base, typename Value, std::size_t Size >
-struct generic_extra_fp_operations :
- public generic_extra_fp_negate< Base, Value, Size >
+struct extra_fp_operations_generic :
+ public extra_fp_negate_generic< Base, Value, Size >
{
- typedef generic_extra_fp_negate< Base, Value, Size > base_type;
+ typedef extra_fp_negate_generic< Base, Value, Size > base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
@@ -174,7 +175,7 @@ struct generic_extra_fp_operations :
// Default extra_fp_operations template definition will be used unless specialized for a specific platform
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations< Base, Value, Size, true > :
- public generic_extra_fp_operations< Base, Value, Size >
+ public extra_fp_operations_generic< Base, Value, Size >
{
};
@@ -182,8 +183,10 @@ struct extra_fp_operations< Base, Value, Size, true > :
} // namespace atomics
} // namespace boost
-#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 60000
+#if defined(BOOST_GCC) && BOOST_GCC >= 60000
#pragma GCC diagnostic pop
#endif
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_operations_fwd.hpp
index 399a823351..15be025ce3 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_operations_fwd.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_operations_fwd.hpp
@@ -16,6 +16,7 @@
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -25,11 +26,13 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base, std::size_t Size, bool Signed, bool = Base::is_always_lock_free >
+template< typename Base, std::size_t Size = sizeof(typename Base::storage_type), bool Signed = Base::is_signed, bool = Base::is_always_lock_free >
struct extra_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_emulated.hpp
index c0e4832944..1de9a66fbb 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_emulated.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_emulated.hpp
@@ -15,115 +15,120 @@
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
#include <cstddef>
+#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
-#include <boost/atomic/detail/lockpool.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// unary minus operator applied to unsigned type, result still unsigned
-#pragma warning(disable: 4146)
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
-//! Generic implementation of extra operations
+//! Emulated implementation of extra operations
template< typename Base, std::size_t Size, bool Signed >
-struct emulated_extra_operations :
+struct extra_operations_emulated :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::scoped_lock scoped_lock;
- static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type old_val = s;
s = static_cast< storage_type >(-old_val);
return old_val;
}
- static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = static_cast< storage_type >(-s);
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ static storage_type add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = s;
new_val += v;
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ static storage_type sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = s;
new_val -= v;
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ static storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = s;
new_val &= v;
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ static storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = s;
new_val |= v;
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ static storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = s;
new_val ^= v;
s = new_val;
return new_val;
}
- static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type old_val = s;
s = static_cast< storage_type >(~old_val);
return old_val;
}
- static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ static storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
storage_type new_val = static_cast< storage_type >(~s);
s = new_val;
return new_val;
@@ -131,99 +136,116 @@ struct emulated_extra_operations :
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_add(storage, v, order);
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_sub(storage, v, order);
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ base_type::fetch_sub(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_and(storage, v, order);
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ base_type::fetch_and(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_or(storage, v, order);
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ base_type::fetch_or(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_xor(storage, v, order);
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ base_type::fetch_xor(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_complement(storage, order);
}
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!add(storage, v, order);
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!sub(storage, v, order);
}
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!negate(storage, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_xor(storage, v, order);
}
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_complement(storage, order);
}
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
- storage_type old_val = Base::fetch_or(storage, mask, order);
+ storage_type old_val = base_type::fetch_or(storage, mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
- storage_type old_val = Base::fetch_and(storage, ~mask, order);
+ storage_type old_val = base_type::fetch_and(storage, ~mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
- storage_type old_val = Base::fetch_xor(storage, mask, order);
+ storage_type old_val = base_type::fetch_xor(storage, mask, order);
return !!(old_val & mask);
}
};
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, Size, Signed, false > :
- public emulated_extra_operations< Base, Size, Signed >
+ public extra_operations_emulated< Base, Size, Signed >
{
};
@@ -231,8 +253,6 @@ struct extra_operations< Base, Size, Signed, false > :
} // namespace atomics
} // namespace boost
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
+#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp
new file mode 100644
index 0000000000..7e9af37553
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp
@@ -0,0 +1,1060 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_aarch32.hpp
+ *
+ * This header contains implementation of the extra atomic operations for AArch32.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH32_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH32_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_aarch32_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct extra_operations_gcc_aarch32_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ // Note: For opaque operations prefer operations returning the resulting values instead of the original values
+ // as these operations require less registers.
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations_gcc_aarch32;
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch32< Base, 1u, Signed > :
+ public extra_operations_generic< Base, 1u, Signed >
+{
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "rsb %[result], %[original], #0\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "rsb %[result], #0\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "add %[result], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "sub %[result], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "and %[result], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "orr %[result], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "eor %[result], %[value]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[original], %[storage]\n\t"\
+ "mvn %[result], %[original]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exb %[result], %[storage]\n\t"\
+ "mvn %[result], %[result]\n\t"\
+ "st" st_mo "exb %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public extra_operations_gcc_aarch32_common< extra_operations_gcc_aarch32< Base, 1u, Signed > >
+{
+};
+
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch32< Base, 2u, Signed > :
+ public extra_operations_generic< Base, 2u, Signed >
+{
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "rsb %[result], %[original], #0\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "rsb %[result], #0\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "add %[result], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "sub %[result], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "and %[result], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "orr %[result], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "eor %[result], %[value]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[original], %[storage]\n\t"\
+ "mvn %[result], %[original]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exh %[result], %[storage]\n\t"\
+ "mvn %[result], %[result]\n\t"\
+ "st" st_mo "exh %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public extra_operations_gcc_aarch32_common< extra_operations_gcc_aarch32< Base, 2u, Signed > >
+{
+};
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch32< Base, 4u, Signed > :
+ public extra_operations_generic< Base, 4u, Signed >
+{
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "rsb %[result], %[original], #0\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "rsb %[result], #0\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "add %[result], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "sub %[result], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "and %[result], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "orr %[result], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "eor %[result], %[value]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : [value] "Ir" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[original], %[storage]\n\t"\
+ "mvn %[result], %[original]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [original] "=&r" (original), [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "ex %[result], %[storage]\n\t"\
+ "mvn %[result], %[result]\n\t"\
+ "st" st_mo "ex %[tmp], %[result], %[storage]\n\t"\
+ "teq %[tmp], #0\n\t"\
+ "bne 1b\n\t"\
+ : [result] "=&r" (result), [tmp] "=&r" (tmp), [storage] "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public extra_operations_gcc_aarch32_common< extra_operations_gcc_aarch32< Base, 4u, Signed > >
+{
+};
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch32< Base, 8u, Signed > :
+ public extra_operations_generic< Base, 8u, Signed >
+{
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "mvn %3, %0\n\t"\
+ "mvn %H3, %H0\n\t"\
+ "adds " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(3) ", #1\n\t"\
+ "adc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(3) ", #0\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "mvn %0, %0\n\t"\
+ "mvn %H0, %H0\n\t"\
+ "adds " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(0) ", #1\n\t"\
+ "adc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(0) ", #0\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "adds " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(3) "\n\t"\
+ "adc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(3) "\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "subs " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(3) "\n\t"\
+ "sbc " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(0) ", " BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(3) "\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "and %0, %3\n\t"\
+ "and %H0, %H3\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "orr %0, %3\n\t"\
+ "orr %H0, %H3\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "eor %0, %3\n\t"\
+ "eor %H0, %H3\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : "r" (v)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "mvn %3, %0\n\t"\
+ "mvn %H3, %H0\n\t"\
+ "st" st_mo "exd %1, %3, %H3, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (original), "=&r" (tmp), "+Q" (storage), "=&r" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "exd %0, %H0, %2\n\t"\
+ "mvn %0, %0\n\t"\
+ "mvn %H0, %H0\n\t"\
+ "st" st_mo "exd %1, %0, %H0, %2\n\t"\
+ "teq %1, #0\n\t"\
+ "bne 1b\n\t"\
+ : "=&r" (result), "=&r" (tmp), "+Q" (storage)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN
+
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public extra_operations_gcc_aarch32_common< extra_operations_gcc_aarch32< Base, 8u, Signed > >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH32_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp
new file mode 100644
index 0000000000..922d810804
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp
@@ -0,0 +1,1330 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_aarch64.hpp
+ *
+ * This header contains implementation of the extra atomic operations for AArch64.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH64_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_aarch64_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct extra_operations_gcc_aarch64_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ // Note: For opaque operations prefer operations returning the resulting values instead of the original values
+ // as these operations require less registers. That is unless LSE is available, in which case
+ // it is better to use the dedicated atomic instructions. The LSE check is done in the base_type,
+ // where needed (e.g. for 128-bit operations there are no LSE instructions).
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations_gcc_aarch64;
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch64< Base, 1u, Signed > :
+ public extra_operations_generic< Base, 1u, Signed >
+{
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "neg %w[result], %w[original]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "neg %w[result], %w[result]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#if !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "add %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "sub %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "and %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "orr %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "eor %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[original], %[storage]\n\t"\
+ "mvn %w[result], %w[original]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrb %w[result], %[storage]\n\t"\
+ "mvn %w[result], %w[result]\n\t"\
+ "st" st_mo "xrb %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public extra_operations_gcc_aarch64_common< extra_operations_gcc_aarch64< Base, 1u, Signed > >
+{
+};
+
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch64< Base, 2u, Signed > :
+ public extra_operations_generic< Base, 2u, Signed >
+{
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "neg %w[result], %w[original]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "neg %w[result], %w[result]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#if !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "add %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "sub %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "and %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "orr %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "eor %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[original], %[storage]\n\t"\
+ "mvn %w[result], %w[original]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xrh %w[result], %[storage]\n\t"\
+ "mvn %w[result], %w[result]\n\t"\
+ "st" st_mo "xrh %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public extra_operations_gcc_aarch64_common< extra_operations_gcc_aarch64< Base, 2u, Signed > >
+{
+};
+
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch64< Base, 4u, Signed > :
+ public extra_operations_generic< Base, 4u, Signed >
+{
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "neg %w[result], %w[original]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "neg %w[result], %w[result]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#if !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "add %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "sub %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "and %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "orr %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "eor %w[result], %w[result], %w[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Kr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[original], %[storage]\n\t"\
+ "mvn %w[result], %w[original]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %w[result], %[storage]\n\t"\
+ "mvn %w[result], %w[result]\n\t"\
+ "st" st_mo "xr %w[tmp], %w[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public extra_operations_gcc_aarch64_common< extra_operations_gcc_aarch64< Base, 4u, Signed > >
+{
+};
+
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch64< Base, 8u, Signed > :
+ public extra_operations_generic< Base, 8u, Signed >
+{
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "neg %x[result], %x[original]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "neg %x[result], %x[result]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#if !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "add %x[result], %x[result], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "sub %x[result], %x[result], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Ir" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "and %x[result], %x[result], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "orr %x[result], %x[result], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "eor %x[result], %x[result], %x[value]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : [value] "Lr" (v)\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[original], %[storage]\n\t"\
+ "mvn %x[result], %x[original]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [result] "=&r" (result), [storage] "+Q" (storage), [original] "=&r" (original)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xr %x[result], %[storage]\n\t"\
+ "mvn %x[result], %x[result]\n\t"\
+ "st" st_mo "xr %w[tmp], %x[result], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage), [result] "=&r" (result)\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result;
+ }
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public extra_operations_gcc_aarch64_common< extra_operations_gcc_aarch64< Base, 8u, Signed > >
+{
+};
+
+
+template< typename Base, bool Signed >
+struct extra_operations_gcc_aarch64< Base, 16u, Signed > :
+ public extra_operations_generic< Base, 16u, Signed >
+{
+ typedef extra_operations_generic< Base, 16u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename base_type::storage_union storage_union;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "mvn %x[result_0], %x[original_0]\n\t"\
+ "mvn %x[result_1], %x[original_1]\n\t"\
+ "adds %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], #1\n\t"\
+ "adc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], xzr\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "mvn %x[result_0], %x[result_0]\n\t"\
+ "mvn %x[result_1], %x[result_1]\n\t"\
+ "adds %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], #1\n\t"\
+ "adc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], xzr\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "adds %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "]\n\t"\
+ "adc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "r" (value.as_uint64[0u]), [value_1] "r" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "subs %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "]\n\t"\
+ "sbc %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[result_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "], %x[value_" BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "r" (value.as_uint64[0u]), [value_1] "r" (value.as_uint64[1u])\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "and %x[result_0], %x[result_0], %x[value_0]\n\t"\
+ "and %x[result_1], %x[result_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "orr %x[result_0], %x[result_0], %x[value_0]\n\t"\
+ "orr %x[result_1], %x[result_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ storage_union value = { v };
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "eor %x[result_0], %x[result_0], %x[value_0]\n\t"\
+ "eor %x[result_1], %x[result_1], %x[value_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : [value_0] "Lr" (value.as_uint64[0u]), [value_1] "Lr" (value.as_uint64[1u])\
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union original;
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[original_0], %x[original_1], %[storage]\n\t"\
+ "mvn %x[result_0], %x[original_0]\n\t"\
+ "mvn %x[result_1], %x[original_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [original_0] "=&r" (original.as_uint64[0u]), [original_1] "=&r" (original.as_uint64[1u]),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return original.as_storage;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_union result;
+ uint32_t tmp;
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN(ld_mo, st_mo)\
+ __asm__ __volatile__\
+ (\
+ "1:\n\t"\
+ "ld" ld_mo "xp %x[result_0], %x[result_1], %[storage]\n\t"\
+ "mvn %x[result_0], %x[result_0]\n\t"\
+ "mvn %x[result_1], %x[result_1]\n\t"\
+ "st" st_mo "xp %w[tmp], %x[result_0], %x[result_1], %[storage]\n\t"\
+ "cbnz %w[tmp], 1b\n\t"\
+ : [tmp] "=&r" (tmp), [storage] "+Q" (storage),\
+ [result_0] "=&r" (result.as_uint64[0u]), [result_1] "=&r" (result.as_uint64[1u])\
+ : \
+ : "memory"\
+ );
+
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(order)
+#undef BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN
+
+ return result.as_storage;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 16u, Signed, true > :
+ public extra_operations_gcc_aarch64_common< extra_operations_gcc_aarch64< Base, 16u, Signed > >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_AARCH64_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp
index e84f1771da..3577b8af93 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp
@@ -19,11 +19,13 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/gcc_arm_asm_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -34,7 +36,7 @@ namespace atomics {
namespace detail {
template< typename Base >
-struct gcc_arm_extra_operations_common :
+struct extra_operations_gcc_arm_common :
public Base
{
typedef Base base_type;
@@ -87,32 +89,32 @@ struct gcc_arm_extra_operations_common :
};
template< typename Base, std::size_t Size, bool Signed >
-struct gcc_arm_extra_operations;
+struct extra_operations_gcc_arm;
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
template< typename Base, bool Signed >
-struct gcc_arm_extra_operations< Base, 1u, Signed > :
- public generic_extra_operations< Base, 1u, Signed >
+struct extra_operations_gcc_arm< Base, 1u, Signed > :
+ public extra_operations_generic< Base, 1u, Signed >
{
- typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u >::type extended_storage_type;
+ typedef typename storage_traits< 4u >::type extended_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -121,24 +123,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(original);
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -147,24 +149,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "add %[result], %[original], %[value]\n" // result = original + value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -173,24 +175,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -199,24 +201,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "and %[result], %[original], %[value]\n" // result = original & value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -225,24 +227,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -251,24 +253,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -277,24 +279,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "mvn %[result], %[original]\n" // result = NOT original
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -303,24 +305,24 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(original);
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "mvn %[result], %[original]\n" // result = NOT original
- "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexb %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -329,14 +331,14 @@ struct gcc_arm_extra_operations< Base, 1u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 1u, Signed, true > :
- public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 1u, Signed > >
+ public extra_operations_gcc_arm_common< extra_operations_gcc_arm< Base, 1u, Signed > >
{
};
@@ -345,27 +347,27 @@ struct extra_operations< Base, 1u, Signed, true > :
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
template< typename Base, bool Signed >
-struct gcc_arm_extra_operations< Base, 2u, Signed > :
- public generic_extra_operations< Base, 2u, Signed >
+struct extra_operations_gcc_arm< Base, 2u, Signed > :
+ public extra_operations_generic< Base, 2u, Signed >
{
- typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u >::type extended_storage_type;
+ typedef typename storage_traits< 4u >::type extended_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -374,24 +376,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(original);
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -400,24 +402,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "add %[result], %[original], %[value]\n" // result = original + value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -426,24 +428,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -452,24 +454,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "and %[result], %[original], %[value]\n" // result = original & value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -478,24 +480,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -504,24 +506,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -530,24 +532,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "mvn %[result], %[original]\n" // result = NOT original
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -556,24 +558,24 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(original);
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
extended_storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
- "mvn %[result], %[original]\n" // result = NOT original
- "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexh %[original], %[storage]\n\t" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -582,40 +584,40 @@ struct gcc_arm_extra_operations< Base, 2u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return static_cast< storage_type >(result);
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 2u, Signed, true > :
- public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 2u, Signed > >
+ public extra_operations_gcc_arm_common< extra_operations_gcc_arm< Base, 2u, Signed > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
template< typename Base, bool Signed >
-struct gcc_arm_extra_operations< Base, 4u, Signed > :
- public generic_extra_operations< Base, 4u, Signed >
+struct extra_operations_gcc_arm< Base, 4u, Signed > :
+ public extra_operations_generic< Base, 4u, Signed >
{
- typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -624,24 +626,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "rsb %[result], %[original], #0\n" // result = 0 - original
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "rsb %[result], %[original], #0\n\t" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -650,24 +652,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "add %[result], %[original], %[value]\n" // result = original + value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "add %[result], %[original], %[value]\n\t" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -676,24 +678,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "sub %[result], %[original], %[value]\n" // result = original - value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n\t" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -702,24 +704,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "and %[result], %[original], %[value]\n" // result = original & value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "and %[result], %[original], %[value]\n\t" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -728,24 +730,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "orr %[result], %[original], %[value]\n" // result = original | value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n\t" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -754,24 +756,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "eor %[result], %[original], %[value]\n" // result = original ^ value
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n\t" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -780,24 +782,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
: [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "mvn %[result], %[original]\n" // result = NOT original
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -806,24 +808,24 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
- "1:\n"
- "ldrex %[original], %[storage]\n" // original = *(&storage)
- "mvn %[result], %[original]\n" // result = NOT original
- "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
- "teq %[tmp], #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrex %[original], %[storage]\n\t" // original = *(&storage)
+ "mvn %[result], %[original]\n\t" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n\t" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
@@ -832,273 +834,277 @@ struct gcc_arm_extra_operations< Base, 4u, Signed > :
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
- public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 4u, Signed > >
+ public extra_operations_gcc_arm_common< extra_operations_gcc_arm< Base, 4u, Signed > >
{
};
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
template< typename Base, bool Signed >
-struct gcc_arm_extra_operations< Base, 8u, Signed > :
- public generic_extra_operations< Base, 8u, Signed >
+struct extra_operations_gcc_arm< Base, 8u, Signed > :
+ public extra_operations_generic< Base, 8u, Signed >
{
- typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "mvn %2, %1\n" // result = NOT original
- "mvn %H2, %H1\n"
- "adds %2, %2, #1\n" // result = result + 1
- "adc %H2, %H2, #0\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "mvn %2, %1\n\t" // result = NOT original
+ "mvn %H2, %H1\n\t"
+ "adds " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", #1\n\t" // result = result + 1
+ "adc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", #0\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage) // %3
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "mvn %2, %1\n" // result = NOT original
- "mvn %H2, %H1\n"
- "adds %2, %2, #1\n" // result = result + 1
- "adc %H2, %H2, #0\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "mvn %2, %1\n\t" // result = NOT original
+ "mvn %H2, %H1\n\t"
+ "adds " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", #1\n\t" // result = result + 1
+ "adc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", #0\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage) // %3
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "adds %2, %1, %4\n" // result = original + value
- "adc %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "adds " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(4) "\n\t" // result = original + value
+ "adc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(4) "\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "subs %2, %1, %4\n" // result = original - value
- "sbc %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "subs " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(4) "\n\t" // result = original - value
+ "sbc " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(2) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(1) ", " BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(4) "\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "and %2, %1, %4\n" // result = original & value
- "and %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "and %2, %1, %4\n\t" // result = original & value
+ "and %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "orr %2, %1, %4\n" // result = original | value
- "orr %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "orr %2, %1, %4\n\t" // result = original | value
+ "orr %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "eor %2, %1, %4\n" // result = original ^ value
- "eor %H2, %H1, %H4\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "eor %2, %1, %4\n\t" // result = original ^ value
+ "eor %H2, %H1, %H4\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage), // %3
- "r" (v) // %4
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ : "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "mvn %2, %1\n" // result = NOT original
- "mvn %H2, %H1\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "mvn %2, %1\n\t" // result = NOT original
+ "mvn %H2, %H1\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage) // %3
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_arm_operations_base::fence_before(order);
+ core_arch_operations_gcc_arm_base::fence_before(order);
storage_type original, result;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "1:\n"
- "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
- "mvn %2, %1\n" // result = NOT original
- "mvn %H2, %H1\n"
- "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
- "teq %0, #0\n" // flags = tmp==0
- "bne 1b\n" // if (!flags.equal) goto retry
+ "1:\n\t"
+ "ldrexd %1, %H1, %3\n\t" // original = *(&storage)
+ "mvn %2, %1\n\t" // result = NOT original
+ "mvn %H2, %H1\n\t"
+ "strexd %0, %2, %H2, %3\n\t" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n\t" // flags = tmp==0
+ "bne 1b\n\t" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
"=&r" (original), // %1
- "=&r" (result) // %2
- : "r" (&storage) // %3
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ "=&r" (result), // %2
+ "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_arm_operations_base::fence_after(order);
+ core_arch_operations_gcc_arm_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 8u, Signed, true > :
- public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 8u, Signed > >
+ public extra_operations_gcc_arm_common< extra_operations_gcc_arm< Base, 8u, Signed > >
{
};
@@ -1108,4 +1114,6 @@ struct extra_operations< Base, 8u, Signed, true > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
index dc4bbdbf74..476c60e332 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
@@ -17,11 +17,13 @@
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/gcc_ppc_asm_common.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -32,7 +34,7 @@ namespace atomics {
namespace detail {
template< typename Base >
-struct gcc_ppc_extra_operations_common :
+struct extra_operations_gcc_ppc_common :
public Base
{
typedef Base base_type;
@@ -85,192 +87,192 @@ struct gcc_ppc_extra_operations_common :
};
template< typename Base, std::size_t Size, bool Signed >
-struct gcc_ppc_extra_operations;
+struct extra_operations_gcc_ppc;
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
template< typename Base, bool Signed >
-struct gcc_ppc_extra_operations< Base, 1u, Signed > :
- public generic_extra_operations< Base, 1u, Signed >
+struct extra_operations_gcc_ppc< Base, 1u, Signed > :
+ public extra_operations_generic< Base, 1u, Signed >
{
- typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stbcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 1u, Signed, true > :
- public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 1u, Signed > >
+ public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 1u, Signed > >
{
};
@@ -279,180 +281,180 @@ struct extra_operations< Base, 1u, Signed, true > :
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
-struct gcc_ppc_extra_operations< Base, 2u, Signed > :
- public generic_extra_operations< Base, 2u, Signed >
+struct extra_operations_gcc_ppc< Base, 2u, Signed > :
+ public extra_operations_generic< Base, 2u, Signed >
{
- typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"sthcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
@@ -460,374 +462,374 @@ struct gcc_ppc_extra_operations< Base, 2u, Signed > :
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
-struct gcc_ppc_extra_operations< Base, 4u, Signed > :
- public generic_extra_operations< Base, 4u, Signed >
+struct extra_operations_gcc_ppc< Base, 4u, Signed > :
+ public extra_operations_generic< Base, 4u, Signed >
{
- typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stwcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
- public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 4u, Signed > >
+ public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 4u, Signed > >
{
};
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
template< typename Base, bool Signed >
-struct gcc_ppc_extra_operations< Base, 8u, Signed > :
- public generic_extra_operations< Base, 8u, Signed >
+struct extra_operations_gcc_ppc< Base, 8u, Signed > :
+ public extra_operations_generic< Base, 8u, Signed >
{
- typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- gcc_ppc_operations_base::fence_before(order);
+ core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
- "1:\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stdcx. %1,%y2\n\t"
- "bne- 1b\n\t"
+ BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
- gcc_ppc_operations_base::fence_after(order);
+ core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 8u, Signed, true > :
- public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 8u, Signed > >
+ public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 8u, Signed > >
{
};
@@ -837,4 +839,6 @@ struct extra_operations< Base, 8u, Signed, true > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_PPC_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp
index ee2cd02a88..a2bdec2f7d 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp
@@ -15,11 +15,13 @@
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
#include <cstddef>
+#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -29,103 +31,13 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base >
-struct gcc_x86_extra_operations_common :
- public Base
-{
- typedef Base base_type;
- typedef typename base_type::storage_type storage_type;
-
- static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return static_cast< storage_type >(Base::fetch_add(storage, v, order) + v);
- }
-
- static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return static_cast< storage_type >(Base::fetch_sub(storage, v, order) - v);
- }
-
- static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
- {
- bool res;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; bts %[bit_number], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccc" (res)
- : [bit_number] "Kq" (bit_number)
- : "memory"
- );
-#else
- __asm__ __volatile__
- (
- "lock; bts %[bit_number], %[storage]\n\t"
- "setc %[result]\n\t"
- : [storage] "+m" (storage), [result] "=q" (res)
- : [bit_number] "Kq" (bit_number)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif
- return res;
- }
-
- static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
- {
- bool res;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; btr %[bit_number], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccc" (res)
- : [bit_number] "Kq" (bit_number)
- : "memory"
- );
-#else
- __asm__ __volatile__
- (
- "lock; btr %[bit_number], %[storage]\n\t"
- "setc %[result]\n\t"
- : [storage] "+m" (storage), [result] "=q" (res)
- : [bit_number] "Kq" (bit_number)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif
- return res;
- }
-
- static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
- {
- bool res;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; btc %[bit_number], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccc" (res)
- : [bit_number] "Kq" (bit_number)
- : "memory"
- );
-#else
- __asm__ __volatile__
- (
- "lock; btc %[bit_number], %[storage]\n\t"
- "setc %[result]\n\t"
- : [storage] "+m" (storage), [result] "=q" (res)
- : [bit_number] "Kq" (bit_number)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif
- return res;
- }
-};
-
template< typename Base, bool Signed >
struct extra_operations< Base, 1u, Signed, true > :
- public gcc_x86_extra_operations_common< Base >
+ public extra_operations_generic< Base, 1u, Signed >
{
- typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u >::type temp_storage_type;
+ typedef typename storage_traits< 4u >::type temp_storage_type;
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
@@ -503,11 +415,11 @@ struct extra_operations< Base, 1u, Signed, true > :
template< typename Base, bool Signed >
struct extra_operations< Base, 2u, Signed, true > :
- public gcc_x86_extra_operations_common< Base >
+ public extra_operations_generic< Base, 2u, Signed >
{
- typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u >::type temp_storage_type;
+ typedef typename storage_traits< 4u >::type temp_storage_type;
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
@@ -881,13 +793,85 @@ struct extra_operations< Base, 2u, Signed, true > :
#endif
return res;
}
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btsw %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btsw %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btrw %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btrw %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btcw %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btcw %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" ((uint16_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
};
template< typename Base, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
- public gcc_x86_extra_operations_common< Base >
+ public extra_operations_generic< Base, 4u, Signed >
{
- typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
@@ -1262,15 +1246,87 @@ struct extra_operations< Base, 4u, Signed, true > :
#endif
return res;
}
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btsl %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btsl %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btrl %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btrl %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btcl %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btcl %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint32_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
};
#if defined(__x86_64__)
template< typename Base, bool Signed >
struct extra_operations< Base, 8u, Signed, true > :
- public gcc_x86_extra_operations_common< Base >
+ public extra_operations_generic< Base, 8u, Signed >
{
- typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
@@ -1645,6 +1701,78 @@ struct extra_operations< Base, 8u, Signed, true > :
#endif
return res;
}
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btsq %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btsq %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btrq %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btrq %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btcq %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btcq %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kr" ((uint64_t)bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
};
#endif // defined(__x86_64__)
@@ -1653,4 +1781,6 @@ struct extra_operations< Base, 8u, Signed, true > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_generic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_generic.hpp
index 43842628a2..815b04c4bc 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_generic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_generic.hpp
@@ -17,33 +17,27 @@
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/integral_extend.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/integral_conversions.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// unary minus operator applied to unsigned type, result still unsigned
-#pragma warning(disable: 4146)
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
//! Generic implementation of extra operations
template< typename Base, std::size_t Size, bool Signed, bool = Base::full_cas_based >
-struct generic_extra_operations :
+struct extra_operations_generic :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< Size >::type emulated_storage_type;
+ typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
@@ -195,12 +189,12 @@ struct generic_extra_operations :
//! Specialization for cases when the platform only natively supports CAS
template< typename Base, std::size_t Size, bool Signed >
-struct generic_extra_operations< Base, Size, Signed, true > :
+struct extra_operations_generic< Base, Size, Signed, true > :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< Size >::type emulated_storage_type;
+ typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
@@ -387,7 +381,7 @@ struct generic_extra_operations< Base, Size, Signed, true > :
// Default extra_operations template definition will be used unless specialized for a specific platform
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, Size, Signed, true > :
- public generic_extra_operations< Base, Size, Signed >
+ public extra_operations_generic< Base, Size, Signed >
{
};
@@ -395,8 +389,6 @@ struct extra_operations< Base, Size, Signed, true > :
} // namespace atomics
} // namespace boost
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
+#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp
index b8eb5bcb31..bc1f4dd281 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp
@@ -18,10 +18,10 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -35,9 +35,9 @@ namespace detail {
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
- public generic_extra_operations< Base, 4u, Signed >
+ public extra_operations_generic< Base, 4u, Signed >
{
- typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
@@ -103,4 +103,6 @@ struct extra_operations< Base, 4u, Signed, true > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp
index 17451a83d6..78c29e12b1 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp
@@ -18,104 +18,28 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
-#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// frame pointer register 'ebx' modified by inline assembly code
-#pragma warning(disable: 4731)
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
-#if defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
-
-template< typename Base, std::size_t Size, bool Signed >
-struct msvc_x86_extra_operations_common :
- public generic_extra_operations< Base, Size, Signed >
-{
- typedef generic_extra_operations< Base, Size, Signed > base_type;
- typedef typename base_type::storage_type storage_type;
-
-#if defined(BOOST_ATOMIC_INTERLOCKED_BTS)
- static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
- {
- return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
- }
-#else
- static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
- {
- base_type::fence_before(order);
- bool result;
- __asm
- {
- mov edx, storage
- mov eax, bit_number
- lock bts [edx], eax
- setc result
- };
- base_type::fence_after(order);
- return result;
- }
-#endif
-
-#if defined(BOOST_ATOMIC_INTERLOCKED_BTR)
- static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
- {
- return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
- }
-#else
- static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
- {
- base_type::fence_before(order);
- bool result;
- __asm
- {
- mov edx, storage
- mov eax, bit_number
- lock btr [edx], eax
- setc result
- };
- base_type::fence_after(order);
- return result;
- }
-#endif
-
#if defined(_M_IX86)
- static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
- {
- base_type::fence_before(order);
- bool result;
- __asm
- {
- mov edx, storage
- mov eax, bit_number
- lock btc [edx], eax
- setc result
- };
- base_type::fence_after(order);
- return result;
- }
-#endif
-};
template< typename Base, bool Signed >
struct extra_operations< Base, 1u, Signed, true > :
- public msvc_x86_extra_operations_common< Base, 1u, Signed >
+ public extra_operations_generic< Base, 1u, Signed >
{
- typedef msvc_x86_extra_operations_common< Base, 1u, Signed > base_type;
+ typedef extra_operations_generic< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
-#if defined(_M_IX86)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -491,17 +415,15 @@ struct extra_operations< Base, 1u, Signed, true > :
base_type::fence_after(order);
return result;
}
-#endif // defined(_M_IX86)
};
template< typename Base, bool Signed >
struct extra_operations< Base, 2u, Signed, true > :
- public msvc_x86_extra_operations_common< Base, 2u, Signed >
+ public extra_operations_generic< Base, 2u, Signed >
{
- typedef msvc_x86_extra_operations_common< Base, 2u, Signed > base_type;
+ typedef extra_operations_generic< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
-#if defined(_M_IX86)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -877,14 +799,62 @@ struct extra_operations< Base, 2u, Signed, true > :
base_type::fence_after(order);
return result;
}
-#endif // defined(_M_IX86)
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock bts word ptr [edx], ax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btr word ptr [edx], ax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btc word ptr [edx], ax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
};
+#endif // defined(_M_IX86)
+
+#if defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
+
template< typename Base, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
- public msvc_x86_extra_operations_common< Base, 4u, Signed >
+ public extra_operations_generic< Base, 4u, Signed >
{
- typedef msvc_x86_extra_operations_common< Base, 4u, Signed > base_type;
+ typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
#if defined(_M_IX86)
@@ -1263,7 +1233,66 @@ struct extra_operations< Base, 4u, Signed, true > :
base_type::fence_after(order);
return result;
}
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btc dword ptr [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
#endif // defined(_M_IX86)
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS)
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+ }
+#elif defined(_M_IX86)
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock bts dword ptr [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+ }
+#elif defined(_M_IX86)
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btr dword ptr [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
};
#endif // defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
@@ -1272,9 +1301,9 @@ struct extra_operations< Base, 4u, Signed, true > :
template< typename Base, bool Signed >
struct extra_operations< Base, 8u, Signed, true > :
- public generic_extra_operations< Base, 8u, Signed >
+ public extra_operations_generic< Base, 8u, Signed >
{
- typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef extra_operations_generic< Base, 8u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
@@ -1294,8 +1323,6 @@ struct extra_operations< Base, 8u, Signed, true > :
} // namespace atomics
} // namespace boost
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
+#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_operations.hpp
new file mode 100644
index 0000000000..912b5db6d6
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_operations.hpp
@@ -0,0 +1,41 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_operations.hpp
+ *
+ * This header defines architecture-specific fence atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/fence_arch_ops_)
+#else
+#include <boost/atomic/detail/fence_operations_emulated.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+typedef fence_operations_emulated fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp
new file mode 100644
index 0000000000..6a8ce4a5db
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp
@@ -0,0 +1,60 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_aarch32.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for AArch32
+struct fence_arch_operations_gcc_aarch32
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ {
+ if (order == memory_order_consume || order == memory_order_acquire)
+ __asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
+ else
+ __asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_aarch32 fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp
new file mode 100644
index 0000000000..66f374a2b7
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_aarch64.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for AArch64
+struct fence_arch_operations_gcc_aarch64
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ {
+ if (order == memory_order_consume || order == memory_order_acquire)
+ __asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
+ else
+ __asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_aarch64 fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp
new file mode 100644
index 0000000000..0c81ed237a
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp
@@ -0,0 +1,53 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_alpha.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for Alpha
+struct fence_arch_operations_gcc_alpha
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_alpha fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp
new file mode 100644
index 0000000000..537a5c7cd5
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp
@@ -0,0 +1,90 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_arm.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/gcc_arm_asm_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for legacy ARM
+struct fence_arch_operations_gcc_arm
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ // A memory barrier is effected using a "co-processor 15" instruction,
+ // though a separate assembler mnemonic is available for it in v7.
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
+ // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
+ // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
+ // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
+ __asm__ __volatile__
+ (
+#if defined(__thumb2__)
+ ".short 0xF3BF, 0x8F5B\n\t" // dmb ish
+#else
+ ".word 0xF57FF05B\n\t" // dmb ish
+#endif
+ :
+ :
+ : "memory"
+ );
+#else
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "mcr p15, 0, r0, c7, c10, 5\n\t"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : "=&l" (tmp)
+ :
+ : "memory"
+ );
+#endif
+ }
+};
+
+typedef fence_arch_operations_gcc_arm fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp
new file mode 100644
index 0000000000..e161ae4f17
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp
@@ -0,0 +1,68 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_ppc.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for PowerPC
+struct fence_arch_operations_gcc_ppc
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ {
+#if defined(__powerpc64__) || defined(__PPC64__)
+ if (order != memory_order_seq_cst)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ else
+ __asm__ __volatile__ ("sync" ::: "memory");
+#else
+ __asm__ __volatile__ ("sync" ::: "memory");
+#endif
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ {
+#if defined(__ibmxl__) || defined(__IBMCPP__)
+ __fence();
+#else
+ __asm__ __volatile__ ("" ::: "memory");
+#endif
+ }
+ }
+};
+
+typedef fence_arch_operations_gcc_ppc fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp
new file mode 100644
index 0000000000..5c18111ee5
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp
@@ -0,0 +1,70 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_sparc.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for SPARC
+struct fence_arch_operations_gcc_sparc
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_release:
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
+ break;
+ case memory_order_acq_rel:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
+ break;
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ break;
+ case memory_order_relaxed:
+ default:
+ break;
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_sparc fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp
new file mode 100644
index 0000000000..0cb61f617c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp
@@ -0,0 +1,69 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_x86.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for x86
+struct fence_arch_operations_gcc_x86
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ {
+ // We could generate mfence for a seq_cst fence here, but a dummy lock-prefixed instruction is enough
+ // and is faster than mfence on most modern x86 CPUs (as of 2020).
+ // Note that we want to apply the atomic operation on any location so that:
+ // - It is not shared with other threads. A variable on the stack suits this well.
+ // - It is likely in cache. Being close to the top of the stack fits this well.
+ // - It does not alias existing data on the stack, so that we don't introduce a false data dependency.
+ // See some performance data here: https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
+ // Unfortunately, to make tools like valgrind happy, we have to initialize the dummy, which is
+ // otherwise not needed.
+ unsigned char dummy = 0u;
+ __asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) : : "memory");
+ }
+ else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
+ {
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_x86 fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp
new file mode 100644
index 0000000000..6647d00303
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp
@@ -0,0 +1,66 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_msvc_arm.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+extern "C" void __dmb(unsigned int);
+#if defined(BOOST_MSVC)
+#pragma intrinsic(__dmb)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for ARM
+struct fence_arch_operations_msvc_arm
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order != memory_order_relaxed)
+ hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
+ }
+};
+
+typedef fence_arch_operations_msvc_arm fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp
new file mode 100644
index 0000000000..8ead6df018
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp
@@ -0,0 +1,66 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_msvc_x86.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for x86
+struct fence_arch_operations_msvc_x86
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ {
+ // See the comment in fence_ops_gcc_x86.hpp as to why we're not using mfence here.
+ // We're not using __faststorefence() here because it generates an atomic operation
+ // on [rsp]/[esp] location, which may alias valid data and cause false data dependency.
+ boost::uint32_t dummy;
+ BOOST_ATOMIC_INTERLOCKED_INCREMENT(&dummy);
+ }
+ else if (order != memory_order_relaxed)
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+};
+
+typedef fence_arch_operations_msvc_x86 fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations.hpp
new file mode 100644
index 0000000000..ec87126f37
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations.hpp
@@ -0,0 +1,41 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_operations.hpp
+ *
+ * This header defines fence atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
+#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/fence_ops_)
+#else
+#include <boost/atomic/detail/fence_arch_operations.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+typedef fence_arch_operations fence_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations_emulated.hpp
new file mode 100644
index 0000000000..adcb2ee183
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_operations_emulated.hpp
@@ -0,0 +1,50 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_operations_emulated.hpp
+ *
+ * This header contains implementation of the \c fence_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations based on lock pool
+struct fence_operations_emulated
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order) BOOST_NOEXCEPT
+ {
+ atomics::detail::lock_pool::thread_fence();
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT
+ {
+ atomics::detail::lock_pool::signal_fence();
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_atomic.hpp
new file mode 100644
index 0000000000..5fa22779d1
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_atomic.hpp
@@ -0,0 +1,75 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_ops_gcc_atomic.hpp
+ *
+ * This header contains implementation of the \c fence_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/fence_arch_operations.hpp>
+#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__INTEL_COMPILER)
+// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
+// for Intel Compiler.
+// In debug builds the compiler does not inline any functions, so basically
+// every atomic function call results in this warning. I don't know any other
+// way to selectively disable just this one warning.
+#pragma system_header
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations based on gcc __atomic* intrinsics
+struct fence_operations_gcc_atomic
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__x86_64__) || defined(__i386__)
+ if (order != memory_order_seq_cst)
+ {
+ __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
+ }
+ else
+ {
+ // gcc, clang, icc and probably other compilers generate mfence for a seq_cst fence,
+ // while a dummy lock-prefixed instruction would be enough and faster. See the comment in fence_ops_gcc_x86.hpp.
+ fence_arch_operations::thread_fence(order);
+ }
+#else
+ __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
+#endif
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
+ }
+};
+
+typedef fence_operations_gcc_atomic fence_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_sync.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_sync.hpp
new file mode 100644
index 0000000000..8fa03acc90
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_gcc_sync.hpp
@@ -0,0 +1,53 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_ops_gcc_sync.hpp
+ *
+ * This header contains implementation of the \c fence_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations based on gcc __sync* intrinsics
+struct fence_operations_gcc_sync
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_operations_gcc_sync fence_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_linux_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_linux_arm.hpp
new file mode 100644
index 0000000000..218ee30378
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_linux_arm.hpp
@@ -0,0 +1,64 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009, 2011 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * Linux-specific code by Phil Endecott
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_ops_linux_arm.hpp
+ *
+ * This header contains implementation of the \c fence_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations based on Linux-specific system routines
+struct fence_operations_linux_arm
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ // See the comment in core_ops_linux_arm.hpp regarding the function pointer below
+ typedef void (*kernel_dmb_t)(void);
+ ((kernel_dmb_t)0xffff0fa0)();
+ }
+};
+
+typedef fence_operations_linux_arm fence_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_windows.hpp
new file mode 100644
index 0000000000..68f02bd0f9
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fence_ops_windows.hpp
@@ -0,0 +1,67 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_ops_windows.hpp
+ *
+ * This header contains implementation of the \c fence_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations based on Windows-specific system calls or intrinsics
+struct fence_operations_windows
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ boost::uint32_t tmp;
+ BOOST_ATOMIC_INTERLOCKED_INCREMENT(&tmp);
+ }
+};
+
+typedef fence_operations_windows fence_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/footer.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/footer.hpp
new file mode 100644
index 0000000000..513471921a
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/footer.hpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright Andrey Semashev 2020.
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ */
+
+#if !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
+
+#if defined(BOOST_MSVC)
+
+#pragma warning(pop)
+
+#elif defined(BOOST_GCC) && BOOST_GCC >= 40600
+
+#pragma GCC diagnostic pop
+
+#elif defined(BOOST_CLANG)
+
+#pragma clang diagnostic pop
+
+#endif
+
+#endif // !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_operations_fwd.hpp
index 8696de31cf..c83a3076dd 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_operations_fwd.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_operations_fwd.hpp
@@ -16,6 +16,7 @@
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -25,11 +26,13 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base, typename Value, std::size_t Size, bool = Base::is_always_lock_free >
+template< typename Base, typename Value, std::size_t Size = sizeof(typename Base::storage_type), bool = Base::is_always_lock_free >
struct fp_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_emulated.hpp
index a87f1814b3..d9d77e33e7 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_emulated.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_emulated.hpp
@@ -15,11 +15,12 @@
#define BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
#include <cstddef>
+#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
#include <boost/atomic/detail/fp_operations_fwd.hpp>
-#include <boost/atomic/detail/lockpool.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -29,29 +30,32 @@ namespace boost {
namespace atomics {
namespace detail {
-//! Generic implementation of floating point operations
+//! Emulated implementation of floating point operations
template< typename Base, typename Value, std::size_t Size >
-struct emulated_fp_operations :
+struct fp_operations_emulated :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
+ typedef typename base_type::scoped_lock scoped_lock;
- static BOOST_FORCEINLINE value_type fetch_add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ static value_type fetch_add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val + v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return old_val;
}
- static BOOST_FORCEINLINE value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ static value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
+ scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val - v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
@@ -61,7 +65,7 @@ struct emulated_fp_operations :
template< typename Base, typename Value, std::size_t Size >
struct fp_operations< Base, Value, Size, false > :
- public emulated_fp_operations< Base, Value, Size >
+ public fp_operations_emulated< Base, Value, Size >
{
};
@@ -69,4 +73,6 @@ struct fp_operations< Base, Value, Size, false > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_generic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_generic.hpp
index b83e85a359..05979f25f1 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_generic.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/fp_ops_generic.hpp
@@ -18,8 +18,9 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/fp_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -31,7 +32,7 @@ namespace detail {
//! Generic implementation of floating point operations
template< typename Base, typename Value, std::size_t Size >
-struct generic_fp_operations :
+struct fp_operations_generic :
public Base
{
typedef Base base_type;
@@ -72,7 +73,7 @@ struct generic_fp_operations :
// Default fp_operations template definition will be used unless specialized for a specific platform
template< typename Base, typename Value, std::size_t Size >
struct fp_operations< Base, Value, Size, true > :
- public generic_fp_operations< Base, Value, Size >
+ public fp_operations_generic< Base, Value, Size >
{
};
@@ -80,4 +81,6 @@ struct fp_operations< Base, Value, Size, true > :
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/futex.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/futex.hpp
new file mode 100644
index 0000000000..39d20b50e4
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/futex.hpp
@@ -0,0 +1,154 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/futex.hpp
+ *
+ * This header defines wrappers around futex syscall.
+ *
+ * http://man7.org/linux/man-pages/man2/futex.2.html
+ * https://man.openbsd.org/futex
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__linux__) || defined(__OpenBSD__) || defined(__NETBSD__) || defined(__NetBSD__)
+
+#include <sys/syscall.h>
+
+#if defined(SYS_futex)
+#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS_futex
+#elif defined(SYS_futex_time64)
+// On some 32-bit targets (e.g. riscv32) SYS_futex is not defined and instead SYS_futex_time64 is implemented,
+// which is equivalent to SYS_futex but uses 64-bit time_t.
+#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS_futex_time64
+#elif defined(__NR_futex)
+// Some Android NDKs (Google NDK and older Crystax.NET NDK versions) don't define SYS_futex.
+#define BOOST_ATOMIC_DETAIL_SYS_FUTEX __NR_futex
+#elif defined(SYS___futex)
+// NetBSD defines SYS___futex, which has slightly different parameters. Basically, it has decoupled timeout and val2 parameters:
+// int __futex(int *addr1, int op, int val1, const struct timespec *timeout, int *addr2, int val2, int val3);
+// https://ftp.netbsd.org/pub/NetBSD/NetBSD-current/src/sys/sys/syscall.h
+// http://bxr.su/NetBSD/sys/kern/sys_futex.c
+#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS___futex
+#define BOOST_ATOMIC_DETAIL_NETBSD_FUTEX
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_SYS_FUTEX)
+
+#include <cstddef>
+#if defined(__linux__)
+#include <linux/futex.h>
+#else
+#error #include <sys/futex.h>
+#endif
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#define BOOST_ATOMIC_DETAIL_HAS_FUTEX
+
+#if defined(FUTEX_PRIVATE_FLAG)
+#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG FUTEX_PRIVATE_FLAG
+#elif defined(__ANDROID__)
+// On Android, futex.h is lacking many definitions, but the actual Linux kernel supports the API in full.
+#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG 128
+#else
+#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG 0
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Invokes an operation on the futex
+BOOST_FORCEINLINE int futex_invoke(void* addr1, int op, unsigned int val1, const void* timeout = NULL, void* addr2 = NULL, unsigned int val3 = 0) BOOST_NOEXCEPT
+{
+#if !defined(BOOST_ATOMIC_DETAIL_NETBSD_FUTEX)
+ return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, timeout, addr2, val3);
+#else
+ // Pass 0 in val2.
+ return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, timeout, addr2, 0u, val3);
+#endif
+}
+
+//! Invokes an operation on the futex
+BOOST_FORCEINLINE int futex_invoke(void* addr1, int op, unsigned int val1, unsigned int val2, void* addr2 = NULL, unsigned int val3 = 0) BOOST_NOEXCEPT
+{
+#if !defined(BOOST_ATOMIC_DETAIL_NETBSD_FUTEX)
+ return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, static_cast< atomics::detail::uintptr_t >(val2), addr2, val3);
+#else
+ // Pass NULL in timeout.
+ return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, static_cast< void* >(NULL), addr2, val2, val3);
+#endif
+}
+
+//! Checks that the value \c pval is \c expected and blocks
+BOOST_FORCEINLINE int futex_wait(void* pval, unsigned int expected) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval, FUTEX_WAIT, expected);
+}
+
+//! Checks that the value \c pval is \c expected and blocks
+BOOST_FORCEINLINE int futex_wait_private(void* pval, unsigned int expected) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval, FUTEX_WAIT | BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG, expected);
+}
+
+//! Wakes the specified number of threads waiting on the futex
+BOOST_FORCEINLINE int futex_signal(void* pval, unsigned int count = 1u) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval, FUTEX_WAKE, count);
+}
+
+//! Wakes the specified number of threads waiting on the futex
+BOOST_FORCEINLINE int futex_signal_private(void* pval, unsigned int count = 1u) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval, FUTEX_WAKE | BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG, count);
+}
+
+//! Wakes all threads waiting on the futex
+BOOST_FORCEINLINE int futex_broadcast(void* pval) BOOST_NOEXCEPT
+{
+ return futex_signal(pval, (~static_cast< unsigned int >(0u)) >> 1);
+}
+
+//! Wakes all threads waiting on the futex
+BOOST_FORCEINLINE int futex_broadcast_private(void* pval) BOOST_NOEXCEPT
+{
+ return futex_signal_private(pval, (~static_cast< unsigned int >(0u)) >> 1);
+}
+
+//! Wakes the wake_count threads waiting on the futex pval1 and requeues up to requeue_count of the blocked threads onto another futex pval2
+BOOST_FORCEINLINE int futex_requeue(void* pval1, void* pval2, unsigned int wake_count = 1u, unsigned int requeue_count = (~static_cast< unsigned int >(0u)) >> 1) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval1, FUTEX_REQUEUE, wake_count, requeue_count, pval2);
+}
+
+//! Wakes the wake_count threads waiting on the futex pval1 and requeues up to requeue_count of the blocked threads onto another futex pval2
+BOOST_FORCEINLINE int futex_requeue_private(void* pval1, void* pval2, unsigned int wake_count = 1u, unsigned int requeue_count = (~static_cast< unsigned int >(0u)) >> 1) BOOST_NOEXCEPT
+{
+ return futex_invoke(pval1, FUTEX_REQUEUE | BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG, wake_count, requeue_count, pval2);
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // defined(BOOST_ATOMIC_DETAIL_SYS_FUTEX)
+
+#endif // defined(__linux__) || defined(__OpenBSD__) || defined(__NETBSD__) || defined(__NetBSD__)
+
+#endif // BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_arm_asm_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_arm_asm_common.hpp
new file mode 100644
index 0000000000..f13d2bac8f
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_arm_asm_common.hpp
@@ -0,0 +1,79 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/gcc_arm_asm_common.hpp
+ *
+ * This header contains basic utilities for gcc asm-based ARM backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// A memory barrier is effected using a "co-processor 15" instruction,
+// though a separate assembler mnemonic is available for it in v7.
+//
+// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
+// doesn't include all instructions and in particular it doesn't include the co-processor
+// instruction used for the memory barrier or the load-locked/store-conditional
+// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
+// asm blocks with code to temporarily change to ARM mode.
+//
+// You can only change between ARM and Thumb modes when branching using the bx instruction.
+// bx takes an address specified in a register. The least significant bit of the address
+// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
+// A temporary register is needed for the address and is passed as an argument to these
+// macros. It must be one of the "low" registers accessible to Thumb code, specified
+// using the "l" attribute in the asm statement.
+//
+// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
+// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
+// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
+// So in v7 we don't need to change to ARM mode; we can write "universal
+// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
+// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
+// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
+// so they can always be present.
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n\t" "bx " #TMPREG "\n\t" ".arm\n\t" ".align 4\n\t" "8:\n\t"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n\t" "bx " #TMPREG "\n\t" ".thumb\n\t" ".align 2\n\t" "9:\n\t"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
+#else
+// Indicate that start/end macros are empty and the tmpreg is not needed
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(arg) "%" BOOST_STRINGIZE(arg)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(arg) "%H" BOOST_STRINGIZE(arg)
+#else
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(arg) "%H" BOOST_STRINGIZE(arg)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(arg) "%" BOOST_STRINGIZE(arg)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_atomic_memory_order_utils.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_atomic_memory_order_utils.hpp
new file mode 100644
index 0000000000..42f5212ce2
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_atomic_memory_order_utils.hpp
@@ -0,0 +1,66 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/gcc_atomic_memory_order_utils.hpp
+ *
+ * This header contains utilities for working with gcc atomic memory order constants.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*!
+ * The function converts \c boost::memory_order values to the compiler-specific constants.
+ *
+ * NOTE: The intention is that the function is optimized away by the compiler, and the
+ * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
+ * work in this case because the standard atomics interface require memory ordering
+ * constants to be passed as function arguments, at which point they stop being constexpr.
+ * However, it is crucial that the compiler sees constants and not runtime values,
+ * because otherwise it just ignores the ordering value and always uses seq_cst.
+ * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
+ * gcc 4.8.2. Intel Compiler issues a warning in this case:
+ *
+ * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
+ *
+ * while gcc acts silently.
+ *
+ * To mitigate the problem ALL functions, including the atomic<> members must be
+ * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
+ * all functions are called with constant orderings and call intrinstcts properly.
+ *
+ * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
+ * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
+ * all atomic operaions will be executed with seq_cst semantics.
+ */
+BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
+{
+ return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
+ (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
+ (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_ppc_asm_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_ppc_asm_common.hpp
new file mode 100644
index 0000000000..9eb119e33b
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/gcc_ppc_asm_common.hpp
@@ -0,0 +1,33 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/gcc_ppc_asm_common.hpp
+ *
+ * This header contains basic utilities for gcc asm-based PowerPC backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !defined(_AIX)
+#define BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL(label) label ":\n\t"
+#define BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP(insn, label, offset) insn " " label "\n\t"
+#else
+// Standard assembler tool (as) on AIX does not support numeric jump labels, so we have to use offsets instead.
+// https://github.com/boostorg/atomic/pull/50
+#define BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL(label)
+#define BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP(insn, label, offset) insn " $" offset "\n\t"
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/header.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/header.hpp
new file mode 100644
index 0000000000..0251c61654
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/header.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright Andrey Semashev 2020.
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ */
+
+#include <boost/config.hpp>
+
+#if !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
+
+#if defined(BOOST_MSVC)
+
+#pragma warning(push, 3)
+// 'm_A' : class 'A' needs to have dll-interface to be used by clients of class 'B'
+#pragma warning(disable: 4251)
+// non dll-interface class 'A' used as base for dll-interface class 'B'
+#pragma warning(disable: 4275)
+// 'this' : used in base member initializer list
+#pragma warning(disable: 4355)
+// 'int' : forcing value to bool 'true' or 'false' (performance warning)
+#pragma warning(disable: 4800)
+// unreferenced formal parameter
+#pragma warning(disable: 4100)
+// conditional expression is constant
+#pragma warning(disable: 4127)
+// default constructor could not be generated
+#pragma warning(disable: 4510)
+// copy constructor could not be generated
+#pragma warning(disable: 4511)
+// assignment operator could not be generated
+#pragma warning(disable: 4512)
+// function marked as __forceinline not inlined
+#pragma warning(disable: 4714)
+// decorated name length exceeded, name was truncated
+#pragma warning(disable: 4503)
+// declaration of 'A' hides previous local declaration
+#pragma warning(disable: 4456)
+// declaration of 'A' hides global declaration
+#pragma warning(disable: 4459)
+// 'X': This function or variable may be unsafe. Consider using Y instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
+#pragma warning(disable: 4996)
+// 'A' : multiple assignment operators specified
+#pragma warning(disable: 4522)
+// unary minus operator applied to unsigned type, result still unsigned
+#pragma warning(disable: 4146)
+// frame pointer register 'ebx' modified by inline assembly code
+#pragma warning(disable: 4731)
+// alignment is sensitive to packing
+#pragma warning(disable: 4121)
+// 'struct_name' : structure was padded due to __declspec(align())
+#pragma warning(disable: 4324)
+
+#elif defined(BOOST_GCC) && BOOST_GCC >= 40600
+
+#pragma GCC diagnostic push
+// unused parameter 'arg'
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// missing initializer for member var
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+
+#elif defined(BOOST_CLANG)
+
+#pragma clang diagnostic push
+// unused parameter 'arg'
+#pragma clang diagnostic ignored "-Wunused-parameter"
+// missing initializer for member var
+#pragma clang diagnostic ignored "-Wmissing-field-initializers"
+
+#endif
+
+#endif // !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/int_sizes.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/int_sizes.hpp
index 2a9757c147..be250df72a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/int_sizes.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/int_sizes.hpp
@@ -47,12 +47,19 @@
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
- !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER)
// Try to deduce sizes from limits
#include <limits.h>
+#if defined(__has_include)
+#if __has_include(<stdint.h>)
+#include <stdint.h>
+#endif
+#endif
#include <boost/cstdint.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT)
#if (USHRT_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1
#elif (USHRT_MAX + 0) == 0xffff
@@ -62,7 +69,9 @@
#elif (USHRT_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8
#endif
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT)
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT)
#if (UINT_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1
#elif (UINT_MAX + 0) == 0xffff
@@ -72,7 +81,9 @@
#elif (UINT_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8
#endif
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT)
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
#if (ULONG_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1
#elif (ULONG_MAX + 0) == 0xffff
@@ -82,7 +93,9 @@
#elif (ULONG_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8
#endif
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
#else
@@ -109,6 +122,17 @@
#endif
#endif // defined(__hpux)
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && defined(UINTPTR_MAX)
+#if (UINTPTR_MAX + 0) == 0xffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 2
+#elif (UINTPTR_MAX + 0) == 0xffffffff
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
+#elif (UINTPTR_MAX + 0) == UINT64_C(0xffffffffffffffff)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
+#endif
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && defined(UINTPTR_MAX)
#endif
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_extend.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_conversions.hpp
index dea48ac6fe..c61a55ad5b 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_extend.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/integral_conversions.hpp
@@ -6,13 +6,13 @@
* Copyright (c) 2018 Andrey Semashev
*/
/*!
- * \file atomic/detail/integral_extend.hpp
+ * \file atomic/detail/integral_conversions.hpp
*
- * This header defines sign/zero extension utilities for Boost.Atomic. The tools assume two's complement signed integer representation.
+ * This header defines sign/zero extension and truncation utilities for Boost.Atomic. The tools assume two's complement signed integer representation.
*/
-#ifndef BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
@@ -20,6 +20,7 @@
#include <boost/atomic/detail/type_traits/is_signed.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
#include <boost/atomic/detail/type_traits/make_unsigned.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -102,4 +103,6 @@ BOOST_FORCEINLINE Output integral_extend(Input input) BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp
index 774354fb7f..fff93e783b 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp
@@ -18,23 +18,31 @@
#if _WIN32_WCE >= 0x600
-extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
-extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
-extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+extern "C" long __cdecl _InterlockedCompareExchange(long volatile*, long, long);
+extern "C" long __cdecl _InterlockedExchangeAdd(long volatile*, long);
+extern "C" long __cdecl _InterlockedExchange(long volatile*, long);
+extern "C" long __cdecl _InterlockedIncrement(long volatile*);
+extern "C" long __cdecl _InterlockedDecrement(long volatile*);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
#else // _WIN32_WCE >= 0x600
-extern "C" long __cdecl InterlockedCompareExchange( long*, long, long );
-extern "C" long __cdecl InterlockedExchangeAdd( long*, long );
-extern "C" long __cdecl InterlockedExchange( long*, long );
+extern "C" long __cdecl InterlockedCompareExchange(long*, long, long);
+extern "C" long __cdecl InterlockedExchangeAdd(long*, long);
+extern "C" long __cdecl InterlockedExchange(long*, long);
+extern "C" long __cdecl InterlockedIncrement(long*);
+extern "C" long __cdecl InterlockedDecrement(long*);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) InterlockedDecrement((long*)(dest))
#endif // _WIN32_WCE >= 0x600
@@ -45,19 +53,25 @@ extern "C" long __cdecl InterlockedExchange( long*, long );
#if _MSC_VER < 1400
-extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
-extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
-extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+extern "C" long __cdecl _InterlockedCompareExchange(long volatile*, long, long);
+extern "C" long __cdecl _InterlockedExchangeAdd(long volatile*, long);
+extern "C" long __cdecl _InterlockedExchange(long volatile*, long);
+extern "C" long __cdecl _InterlockedIncrement(long volatile*);
+extern "C" long __cdecl _InterlockedDecrement(long volatile*);
#if defined(BOOST_MSVC)
#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchangeAdd)
#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedDecrement)
#endif
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
@@ -70,6 +84,8 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchangeAdd)
#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedDecrement)
#pragma intrinsic(_InterlockedAnd)
#pragma intrinsic(_InterlockedOr)
#pragma intrinsic(_InterlockedXor)
@@ -80,6 +96,8 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
@@ -441,6 +459,8 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) InterlockedDecrement((long*)(dest))
#if defined(_WIN64)
@@ -477,10 +497,14 @@ extern "C" {
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedIncrement(long volatile*, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedDecrement(long volatile*, long);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) boost::atomics::detail::InterlockedIncrement((long*)(dest))
+#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) boost::atomics::detail::InterlockedDecrement((long*)(dest))
#if defined(_WIN64)
@@ -488,8 +512,8 @@ BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(_
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
-BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
-BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile*, void*, void*);
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile*, void*);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/intptr.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/intptr.hpp
new file mode 100644
index 0000000000..e8e30c7f32
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/intptr.hpp
@@ -0,0 +1,46 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/intptr.hpp
+ *
+ * This header defines (u)intptr_t types.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#if defined(BOOST_HAS_INTPTR_T)
+#include <cstddef>
+#endif
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_HAS_INTPTR_T)
+using boost::uintptr_t;
+using boost::intptr_t;
+#else
+typedef std::size_t uintptr_t;
+typedef std::ptrdiff_t intptr_t;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/lock_pool.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/lock_pool.hpp
new file mode 100644
index 0000000000..64b66a29bb
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/lock_pool.hpp
@@ -0,0 +1,151 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013-2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/lock_pool.hpp
+ *
+ * This header contains declaration of the lock pool used to emulate atomic ops.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/link.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#if defined(BOOST_WINDOWS)
+#include <boost/winapi/thread.hpp>
+#elif defined(BOOST_HAS_NANOSLEEP)
+#include <time.h>
+#else
+#include <unistd.h>
+#endif
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+BOOST_FORCEINLINE void wait_some() BOOST_NOEXCEPT
+{
+#if defined(BOOST_WINDOWS)
+ boost::winapi::SwitchToThread();
+#elif defined(BOOST_HAS_NANOSLEEP)
+ // Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
+ // pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
+ // to potentially migrate to this CPU and complete the tasks we're waiting for.
+ struct ::timespec ts = {};
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000;
+ ::nanosleep(&ts, NULL);
+#else
+ ::usleep(1);
+#endif
+}
+
+namespace lock_pool {
+
+BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void unlock(void* ls) BOOST_NOEXCEPT;
+
+BOOST_ATOMIC_DECL void* allocate_wait_state(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void free_wait_state(void* ls, void* ws) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void wait(void* ls, void* ws) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void notify_one(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void notify_all(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
+
+BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
+BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
+
+template< std::size_t Alignment >
+BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) BOOST_NOEXCEPT
+{
+ atomics::detail::uintptr_t ptr = (atomics::detail::uintptr_t)addr;
+ atomics::detail::uintptr_t h = ptr / Alignment;
+
+ // Since many malloc/new implementations return pointers with higher alignment
+ // than indicated by Alignment, it makes sense to mix higher bits
+ // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
+ // on 32-bit - to 8 bytes.
+ BOOST_CONSTEXPR_OR_CONST std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
+ BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
+ h ^= ptr / malloc_alignment;
+
+ return h;
+}
+
+template< std::size_t Alignment, bool LongLock = false >
+class scoped_lock
+{
+private:
+ void* m_lock;
+
+public:
+ explicit scoped_lock(const volatile void* addr) BOOST_NOEXCEPT
+ {
+ atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
+ BOOST_IF_CONSTEXPR (!LongLock)
+ m_lock = lock_pool::short_lock(h);
+ else
+ m_lock = lock_pool::long_lock(h);
+ }
+ ~scoped_lock() BOOST_NOEXCEPT
+ {
+ lock_pool::unlock(m_lock);
+ }
+
+ void* get_lock_state() const BOOST_NOEXCEPT
+ {
+ return m_lock;
+ }
+
+ BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
+ BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
+};
+
+template< std::size_t Alignment >
+class scoped_wait_state :
+ public scoped_lock< Alignment, true >
+{
+private:
+ void* m_wait_state;
+
+public:
+ explicit scoped_wait_state(const volatile void* addr) BOOST_NOEXCEPT :
+ scoped_lock< Alignment, true >(addr)
+ {
+ m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
+ }
+ ~scoped_wait_state() BOOST_NOEXCEPT
+ {
+ lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
+ }
+
+ void wait() BOOST_NOEXCEPT
+ {
+ lock_pool::wait(this->get_lock_state(), m_wait_state);
+ }
+
+ BOOST_DELETED_FUNCTION(scoped_wait_state(scoped_wait_state const&))
+ BOOST_DELETED_FUNCTION(scoped_wait_state& operator=(scoped_wait_state const&))
+};
+
+} // namespace lock_pool
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/lockpool.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/lockpool.hpp
deleted file mode 100644
index 4e249aa048..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/lockpool.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2011 Helge Bahmann
- * Copyright (c) 2013-2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/lockpool.hpp
- *
- * This header contains declaration of the lockpool used to emulate atomic ops.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
-
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/link.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-struct lockpool
-{
- class scoped_lock
- {
- void* m_lock;
-
- public:
- explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr) BOOST_NOEXCEPT;
- BOOST_ATOMIC_DECL ~scoped_lock() BOOST_NOEXCEPT;
-
- BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
- BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
- };
-
- static BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
- static BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
-};
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/memory_order_utils.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/memory_order_utils.hpp
new file mode 100644
index 0000000000..6f6ab7555d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/memory_order_utils.hpp
@@ -0,0 +1,47 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/memory_order_utils.hpp
+ *
+ * This header contains utilities related to memory order constants.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT
+{
+ return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);
+}
+
+BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+{
+ // 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
+ // Given the enum values we can test the strength of memory order requirements with this single condition.
+ return (static_cast< unsigned int >(failure_order) & 15u) <= (static_cast< unsigned int >(success_order) & 15u);
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/once_flag.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/once_flag.hpp
new file mode 100644
index 0000000000..45bfd2f81e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/once_flag.hpp
@@ -0,0 +1,43 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/once_flag.hpp
+ *
+ * This header declares \c once_flag structure for controlling one time initialization
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/aligned_variable.hpp>
+#include <boost/atomic/detail/core_operations.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+typedef atomics::detail::core_operations< 1u, false, false > once_flag_operations;
+
+struct once_flag
+{
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR(once_flag_operations::storage_alignment, once_flag_operations::storage_type, m_flag);
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations.hpp
deleted file mode 100644
index d81399a8e3..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/operations.hpp
- *
- * This header defines atomic operations, including the emulated version.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
-
-#include <boost/atomic/detail/operations_lockfree.hpp>
-#include <boost/atomic/detail/ops_emulated.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_fwd.hpp
deleted file mode 100644
index efd4970747..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_fwd.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/operations_fwd.hpp
- *
- * This header contains forward declaration of the \c operations template.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
-
-#include <cstddef>
-#include <boost/atomic/detail/config.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-template< std::size_t Size, bool Signed >
-struct operations;
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_lockfree.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_lockfree.hpp
deleted file mode 100644
index 62b45836b5..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/operations_lockfree.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/operations_lockfree.hpp
- *
- * This header defines lockfree atomic operations.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
-
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/platform.hpp>
-
-#if !defined(BOOST_ATOMIC_EMULATED)
-#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/ops_)
-#else
-#include <boost/atomic/detail/operations_fwd.hpp>
-#endif
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_emulated.hpp
deleted file mode 100644
index f30fbdab9f..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_emulated.hpp
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/ops_emulated.hpp
- *
- * This header contains lockpool-based implementation of the \c operations template.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
-
-#include <cstddef>
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/detail/lockpool.hpp>
-#include <boost/atomic/capabilities.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-template< std::size_t Size, bool Signed >
-struct emulated_operations
-{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
- static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
-
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = false;
-
- static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- lockpool::scoped_lock lock(&storage);
- const_cast< storage_type& >(storage) = v;
- }
-
- static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
- {
- lockpool::scoped_lock lock(&storage);
- return const_cast< storage_type const& >(storage);
- }
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s += v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s -= v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s = v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- const bool res = old_val == expected;
- if (res)
- s = desired;
- expected = old_val;
-
- return res;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_weak(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- // Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
- // is that MSVC-12 ICEs in this case.
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- const bool res = old_val == expected;
- if (res)
- s = desired;
- expected = old_val;
-
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s &= v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s |= v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type& s = const_cast< storage_type& >(storage);
- lockpool::scoped_lock lock(&storage);
- storage_type old_val = s;
- s ^= v;
- return old_val;
- }
-
- static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- return !!exchange(storage, (storage_type)1, order);
- }
-
- static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- store(storage, (storage_type)0, order);
- }
-};
-
-template< std::size_t Size, bool Signed >
-struct operations :
- public emulated_operations< Size, Signed >
-{
-};
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch32_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch32_common.hpp
new file mode 100644
index 0000000000..c963931524
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch32_common.hpp
@@ -0,0 +1,53 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_aarch32_common.hpp
+ *
+ * This header contains basic utilities for gcc AArch32 backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(mo)\
+ switch (mo)\
+ {\
+ case memory_order_relaxed:\
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("r", "r")\
+ break;\
+ \
+ case memory_order_consume:\
+ case memory_order_acquire:\
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("a", "r")\
+ break;\
+ \
+ case memory_order_release:\
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("r", "l")\
+ break;\
+ \
+ default:\
+ BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("a", "l")\
+ break;\
+ }
+
+#if defined(BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN)
+#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(arg) "%" BOOST_STRINGIZE(arg)
+#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(arg) "%H" BOOST_STRINGIZE(arg)
+#else
+#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(arg) "%H" BOOST_STRINGIZE(arg)
+#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(arg) "%" BOOST_STRINGIZE(arg)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch64_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch64_common.hpp
new file mode 100644
index 0000000000..80c26af78d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_aarch64_common.hpp
@@ -0,0 +1,53 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_aarch64_common.hpp
+ *
+ * This header contains basic utilities for gcc AArch64 backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(mo)\
+ switch (mo)\
+ {\
+ case memory_order_relaxed:\
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("", "")\
+ break;\
+ \
+ case memory_order_consume:\
+ case memory_order_acquire:\
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("a", "")\
+ break;\
+ \
+ case memory_order_release:\
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("", "l")\
+ break;\
+ \
+ default:\
+ BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("a", "l")\
+ break;\
+ }
+
+#if defined(BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN)
+#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "0"
+#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "1"
+#else
+#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "1"
+#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "0"
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp
index 73c04ffe15..08046a945a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp
@@ -19,6 +19,8 @@
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/fence_arch_operations.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -28,51 +30,7 @@ namespace boost {
namespace atomics {
namespace detail {
-// A memory barrier is effected using a "co-processor 15" instruction,
-// though a separate assembler mnemonic is available for it in v7.
-//
-// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
-// doesn't include all instructions and in particular it doesn't include the co-processor
-// instruction used for the memory barrier or the load-locked/store-conditional
-// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
-// asm blocks with code to temporarily change to ARM mode.
-//
-// You can only change between ARM and Thumb modes when branching using the bx instruction.
-// bx takes an address specified in a register. The least significant bit of the address
-// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
-// A temporary register is needed for the address and is passed as an argument to these
-// macros. It must be one of the "low" registers accessible to Thumb code, specified
-// using the "l" attribute in the asm statement.
-//
-// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
-// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
-// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
-// So in v7 we don't need to change to ARM mode; we can write "universal
-// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
-// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
-// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
-// so they can always be present.
-
-// A note about memory_order_consume. Technically, this architecture allows to avoid
-// unnecessary memory barrier after consume load since it supports data dependency ordering.
-// However, some compiler optimizations may break a seemingly valid code relying on data
-// dependency tracking by injecting bogus branches to aid out of order execution.
-// This may happen not only in Boost.Atomic code but also in user's code, which we have no
-// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
-// For this reason we promote memory_order_consume to memory_order_acquire.
-
-#if defined(__thumb__) && !defined(__thumb2__)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
-#else
-// The tmpreg may be wasted in this case, which is non-optimal.
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
-#endif
-
-struct gcc_arm_operations_base
+struct core_arch_operations_gcc_arm_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -80,50 +38,19 @@ struct gcc_arm_operations_base
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
- hardware_full_fence();
+ fence_arch_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
- hardware_full_fence();
+ fence_arch_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
- hardware_full_fence();
- }
-
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
-#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
- // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
- // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
- // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
- __asm__ __volatile__
- (
-#if defined(__thumb2__)
- ".short 0xF3BF, 0x8F5B\n" // dmb ish
-#else
- ".word 0xF57FF05B\n" // dmb ish
-#endif
- :
- :
- : "memory"
- );
-#else
- uint32_t tmp;
- __asm__ __volatile__
- (
- BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "mcr\tp15, 0, r0, c7, c10, 5\n"
- BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
- : "=&l" (tmp)
- :
- : "memory"
- );
-#endif
+ fence_arch_operations::hardware_full_fence();
}
};
@@ -131,4 +58,6 @@ struct gcc_arm_operations_base
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp
deleted file mode 100644
index ce40e3b2b9..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/ops_gcc_atomic.hpp
- *
- * This header contains implementation of the \c operations template.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
-
-#include <cstddef>
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
-#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
-#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
-#include <boost/atomic/detail/ops_cas_based.hpp>
-#endif
-
-#if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\
- __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\
- __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\
- __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE
-// There are platforms where we need to use larger storage types
-#include <boost/atomic/detail/int_sizes.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
-#endif
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-#if defined(__INTEL_COMPILER)
-// This is used to suppress warning #32013 described below for Intel Compiler.
-// In debug builds the compiler does not inline any functions, so basically
-// every atomic function call results in this warning. I don't know any other
-// way to selectively disable just this one warning.
-#pragma system_header
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-/*!
- * The function converts \c boost::memory_order values to the compiler-specific constants.
- *
- * NOTE: The intention is that the function is optimized away by the compiler, and the
- * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
- * work in this case because the standard atomics interface require memory ordering
- * constants to be passed as function arguments, at which point they stop being constexpr.
- * However, it is crucial that the compiler sees constants and not runtime values,
- * because otherwise it just ignores the ordering value and always uses seq_cst.
- * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
- * gcc 4.8.2. Intel Compiler issues a warning in this case:
- *
- * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
- *
- * while gcc acts silently.
- *
- * To mitigate the problem ALL functions, including the atomic<> members must be
- * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
- * all functions are called with constant orderings and call intrinstcts properly.
- *
- * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
- * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
- * all atomic operaions will be executed with seq_cst semantics.
- */
-BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
-{
- return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
- (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
- (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
-}
-
-template< std::size_t Size, bool Signed >
-struct gcc_atomic_operations
-{
- typedef typename make_storage_type< Size >::type storage_type;
- typedef typename make_storage_type< Size >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
- static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
-
- // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
- // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
- // always set is_always_lock_free to true here.
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
-
- static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
- {
- return __atomic_compare_exchange_n
- (
- &storage, &expected, desired, false,
- atomics::detail::convert_memory_order_to_gcc(success_order),
- atomics::detail::convert_memory_order_to_gcc(failure_order)
- );
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_weak(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
- {
- return __atomic_compare_exchange_n
- (
- &storage, &expected, desired, true,
- atomics::detail::convert_memory_order_to_gcc(success_order),
- atomics::detail::convert_memory_order_to_gcc(failure_order)
- );
- }
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
- }
-
- static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
- }
-};
-
-#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
-#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-
-// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
-// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
-// A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
-template< bool Signed >
-struct operations< 16u, Signed > :
- public cas_based_operations< gcc_dcas_x86_64< Signed > >
-{
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-};
-
-#else
-
-template< bool Signed >
-struct operations< 16u, Signed > :
- public gcc_atomic_operations< 16u, Signed >
-{
-};
-
-#endif
-#endif
-
-
-#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
-#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
-
-// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
-template< bool Signed >
-struct operations< 8u, Signed > :
- public cas_based_operations< gcc_dcas_x86< Signed > >
-{
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-};
-
-#elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
-
-#define BOOST_ATOMIC_DETAIL_INT64_EXTENDED
-
-template< bool Signed >
-struct operations< 8u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
-{
-};
-
-#else
-
-template< bool Signed >
-struct operations< 8u, Signed > :
- public gcc_atomic_operations< 8u, Signed >
-{
-};
-
-#endif
-#endif
-
-#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
-#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
-
-#define BOOST_ATOMIC_DETAIL_INT32_EXTENDED
-
-#if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
-
-template< bool Signed >
-struct operations< 4u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
-{
-};
-
-#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
-
-template< bool Signed >
-struct operations< 4u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
-{
-};
-
-#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
-
-#else
-
-template< bool Signed >
-struct operations< 4u, Signed > :
- public gcc_atomic_operations< 4u, Signed >
-{
-};
-
-#endif
-#endif
-
-#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
-#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
-
-#define BOOST_ATOMIC_DETAIL_INT16_EXTENDED
-
-#if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
-
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
-{
-};
-
-#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
-
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
-{
-};
-
-#else
-
-template< bool Signed >
-struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
-{
-};
-
-#endif
-
-#else
-
-template< bool Signed >
-struct operations< 2u, Signed > :
- public gcc_atomic_operations< 2u, Signed >
-{
-};
-
-#endif
-#endif
-
-#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
-#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
- (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\
- (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\
- (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)
-
-#if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
-{
-};
-
-#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
-{
-};
-
-#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
-{
-};
-
-#else
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
-{
-};
-
-#endif
-
-#else
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public gcc_atomic_operations< 1u, Signed >
-{
-};
-
-#endif
-#endif
-
-#undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED
-#undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED
-#undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
-}
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp
index e5c9303bf7..0bf4553405 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp
@@ -18,6 +18,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -38,7 +39,7 @@ namespace detail {
// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
// For this reason we promote memory_order_consume to memory_order_acquire.
-struct gcc_ppc_operations_base
+struct core_arch_operations_gcc_ppc_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -67,4 +68,6 @@ struct gcc_ppc_operations_base
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp
deleted file mode 100644
index 007d4eeeeb..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/ops_gcc_x86.hpp
- *
- * This header contains implementation of the \c operations template.
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
-
-#include <cstddef>
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/storage_type.hpp>
-#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/capabilities.hpp>
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
-#include <boost/atomic/detail/ops_cas_based.hpp>
-#endif
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-struct gcc_x86_operations_base
-{
- static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
-
- static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
- {
- if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
- __asm__ __volatile__ ("" ::: "memory");
- }
-
- static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
- {
- if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
- __asm__ __volatile__ ("" ::: "memory");
- }
-};
-
-template< std::size_t Size, bool Signed, typename Derived >
-struct gcc_x86_operations :
- public gcc_x86_operations_base
-{
- typedef typename make_storage_type< Size >::type storage_type;
-
- static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- if (order != memory_order_seq_cst)
- {
- fence_before(order);
- storage = v;
- fence_after(order);
- }
- else
- {
- Derived::exchange(storage, v, order);
- }
- }
-
- static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- storage_type v = storage;
- fence_after(order);
- return v;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- return Derived::fetch_add(storage, -v, order);
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_weak(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
- {
- return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
- }
-
- static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- return !!Derived::exchange(storage, (storage_type)1, order);
- }
-
- static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- store(storage, (storage_type)0, order);
- }
-};
-
-template< bool Signed >
-struct operations< 1u, Signed > :
- public gcc_x86_operations< 1u, Signed, operations< 1u, Signed > >
-{
- typedef gcc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
- typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u >::type temp_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "lock; xaddb %0, %1"
- : "+q" (v), "+m" (storage)
- :
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "xchgb %0, %1"
- : "+q" (v), "+m" (storage)
- :
- : "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- storage_type previous = expected;
- bool success;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgb %3, %1"
- : "+a" (previous), "+m" (storage), "=@ccz" (success)
- : "q" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgb %3, %1\n\t"
- "sete %2"
- : "+a" (previous), "+m" (storage), "=q" (success)
- : "q" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- expected = previous;
- return success;
- }
-
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
- temp_storage_type new_val;\
- __asm__ __volatile__\
- (\
- ".align 16\n\t"\
- "1: mov %[arg], %2\n\t"\
- op " %%al, %b2\n\t"\
- "lock; cmpxchgb %b2, %[storage]\n\t"\
- "jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
- : [arg] "ir" ((temp_storage_type)argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
- )
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
- return res;
- }
-
-#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
-};
-
-template< bool Signed >
-struct operations< 2u, Signed > :
- public gcc_x86_operations< 2u, Signed, operations< 2u, Signed > >
-{
- typedef gcc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
- typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u >::type temp_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "lock; xaddw %0, %1"
- : "+q" (v), "+m" (storage)
- :
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "xchgw %0, %1"
- : "+q" (v), "+m" (storage)
- :
- : "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- storage_type previous = expected;
- bool success;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgw %3, %1"
- : "+a" (previous), "+m" (storage), "=@ccz" (success)
- : "q" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgw %3, %1\n\t"
- "sete %2"
- : "+a" (previous), "+m" (storage), "=q" (success)
- : "q" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- expected = previous;
- return success;
- }
-
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
- temp_storage_type new_val;\
- __asm__ __volatile__\
- (\
- ".align 16\n\t"\
- "1: mov %[arg], %2\n\t"\
- op " %%ax, %w2\n\t"\
- "lock; cmpxchgw %w2, %[storage]\n\t"\
- "jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
- : [arg] "ir" ((temp_storage_type)argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
- )
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
- return res;
- }
-
-#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
-};
-
-template< bool Signed >
-struct operations< 4u, Signed > :
- public gcc_x86_operations< 4u, Signed, operations< 4u, Signed > >
-{
- typedef gcc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
- typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "lock; xaddl %0, %1"
- : "+r" (v), "+m" (storage)
- :
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "xchgl %0, %1"
- : "+r" (v), "+m" (storage)
- :
- : "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- storage_type previous = expected;
- bool success;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgl %3, %1"
- : "+a" (previous), "+m" (storage), "=@ccz" (success)
- : "r" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgl %3, %1\n\t"
- "sete %2"
- : "+a" (previous), "+m" (storage), "=q" (success)
- : "r" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- expected = previous;
- return success;
- }
-
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
- storage_type new_val;\
- __asm__ __volatile__\
- (\
- ".align 16\n\t"\
- "1: mov %[arg], %[new_val]\n\t"\
- op " %%eax, %[new_val]\n\t"\
- "lock; cmpxchgl %[new_val], %[storage]\n\t"\
- "jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
- : [arg] "ir" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
- )
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
- return res;
- }
-
-#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
-};
-
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
-
-template< bool Signed >
-struct operations< 8u, Signed > :
- public cas_based_operations< gcc_dcas_x86< Signed > >
-{
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-};
-
-#elif defined(__x86_64__)
-
-template< bool Signed >
-struct operations< 8u, Signed > :
- public gcc_x86_operations< 8u, Signed, operations< 8u, Signed > >
-{
- typedef gcc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
- typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-
- static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "lock; xaddq %0, %1"
- : "+r" (v), "+m" (storage)
- :
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- __asm__ __volatile__
- (
- "xchgq %0, %1"
- : "+r" (v), "+m" (storage)
- :
- : "memory"
- );
- return v;
- }
-
- static BOOST_FORCEINLINE bool compare_exchange_strong(
- storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
- {
- storage_type previous = expected;
- bool success;
-#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgq %3, %1"
- : "+a" (previous), "+m" (storage), "=@ccz" (success)
- : "r" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- __asm__ __volatile__
- (
- "lock; cmpxchgq %3, %1\n\t"
- "sete %2"
- : "+a" (previous), "+m" (storage), "=q" (success)
- : "r" (desired)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
- expected = previous;
- return success;
- }
-
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
- storage_type new_val;\
- __asm__ __volatile__\
- (\
- ".align 16\n\t"\
- "1: movq %[arg], %[new_val]\n\t"\
- op " %%rax, %[new_val]\n\t"\
- "lock; cmpxchgq %[new_val], %[storage]\n\t"\
- "jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
- : [arg] "r" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
- )
-
- static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
- return res;
- }
-
- static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
- {
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
- return res;
- }
-
-#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
-};
-
-#endif
-
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-
-template< bool Signed >
-struct operations< 16u, Signed > :
- public cas_based_operations< gcc_dcas_x86_64< Signed > >
-{
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
-};
-
-#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
-
-BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order == memory_order_seq_cst)
- {
- __asm__ __volatile__
- (
-#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
- "mfence\n"
-#else
- "lock; addl $0, (%%esp)\n"
-#endif
- ::: "memory"
- );
- }
- else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
- {
- __asm__ __volatile__ ("" ::: "memory");
- }
-}
-
-BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
-{
- if (order != memory_order_relaxed)
- __asm__ __volatile__ ("" ::: "memory");
-}
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp
index 53628f3600..ef9c0abb72 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp
@@ -5,18 +5,19 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014, 2019 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_msvc_common.hpp
*
- * This header contains common tools for MSVC implementation of the \c operations template.
+ * This header contains common tools for MSVC implementation of the atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -25,6 +26,8 @@
// Define compiler barriers
#if defined(__INTEL_COMPILER)
#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier()
+#elif defined(__clang__)
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __atomic_signal_fence(__ATOMIC_SEQ_CST)
#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
extern "C" void _ReadWriteBarrier(void);
#pragma intrinsic(_ReadWriteBarrier)
@@ -35,4 +38,6 @@ extern "C" void _ReadWriteBarrier(void);
#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER()
#endif
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/pause.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/pause.hpp
index 37aa5ca84e..efdfb62b6a 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/pause.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/pause.hpp
@@ -4,23 +4,31 @@
* http://www.boost.org/LICENSE_1_0.txt)
*
* (C) Copyright 2013 Tim Blechmann
- * (C) Copyright 2013 Andrey Semashev
+ * (C) Copyright 2013, 2020 Andrey Semashev
*/
#ifndef BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
+#if defined(_MSC_VER)
+#if defined(_M_AMD64) || defined(_M_IX86)
extern "C" void _mm_pause(void);
#if defined(BOOST_MSVC)
#pragma intrinsic(_mm_pause)
#endif
+#elif defined(_M_ARM64) || defined(_M_ARM)
+extern "C" void __yield(void);
+#if defined(BOOST_MSVC)
+#pragma intrinsic(__yield)
+#endif
+#endif
#endif
namespace boost {
@@ -29,10 +37,18 @@ namespace detail {
BOOST_FORCEINLINE void pause() BOOST_NOEXCEPT
{
-#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
+#if defined(_MSC_VER)
+#if defined(_M_AMD64) || defined(_M_IX86)
_mm_pause();
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
- __asm__ __volatile__("pause;");
+#elif defined(_M_ARM64) || defined(_M_ARM)
+ __yield();
+#endif
+#elif defined(__GNUC__)
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__("pause;" : : : "memory");
+#elif (defined(__ARM_ARCH) && __ARM_ARCH >= 8) || defined(__ARM_ARCH_8A__) || defined(__aarch64__)
+ __asm__ __volatile__("yield;" : : : "memory");
+#endif
#endif
}
@@ -40,4 +56,6 @@ BOOST_FORCEINLINE void pause() BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/platform.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/platform.hpp
index df4cc305ac..82609586da 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/platform.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/platform.hpp
@@ -4,7 +4,7 @@
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014-2018, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/platform.hpp
@@ -51,34 +51,44 @@
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_x86
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_x86
-#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
+#elif defined(__GNUC__) && defined(__aarch64__)
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
-#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_ppc
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_aarch64
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_aarch64
-#elif defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+#elif defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm
+#if (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 8)
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_aarch32
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_aarch32
+#else
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_arm
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_arm
+#endif
+
+#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
+
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_ppc
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_ppc
#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__sparcv8plus) || defined(__sparc_v9__))
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_sparc
#elif defined(__GNUC__) && defined(__alpha__)
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_alpha
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
-#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND msvc_x86
#elif defined(_MSC_VER) && _MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64))
-#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND msvc_arm
#endif
@@ -90,21 +100,19 @@
((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
(defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\
(\
- (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\
+ (__GCC_ATOMIC_BOOL_LOCK_FREE == 2) ||\
+ (__GCC_ATOMIC_CHAR_LOCK_FREE == 2) ||\
+ (__GCC_ATOMIC_SHORT_LOCK_FREE == 2) ||\
+ (__GCC_ATOMIC_INT_LOCK_FREE == 2) ||\
+ (__GCC_ATOMIC_LONG_LOCK_FREE == 2) ||\
+ (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)\
)
-#define BOOST_ATOMIC_DETAIL_BACKEND gcc_atomic
-
-#elif defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+#define BOOST_ATOMIC_DETAIL_CORE_BACKEND gcc_atomic
-#define BOOST_ATOMIC_DETAIL_BACKEND BOOST_ATOMIC_DETAIL_PLATFORM
-
-#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\
+// GCC __sync* instrinsics backend is less efficient than asm-based backends, so use it only when nothing better is available.
+#elif !defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND) &&\
+ defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\
(\
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) ||\
@@ -113,32 +121,59 @@
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\
)
-#define BOOST_ATOMIC_DETAIL_BACKEND gcc_sync
+#define BOOST_ATOMIC_DETAIL_CORE_BACKEND gcc_sync
#endif
// OS-based backends
-#if !defined(BOOST_ATOMIC_DETAIL_BACKEND)
+#if !defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND) && !defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND)
#if defined(__linux__) && defined(__arm__)
-#define BOOST_ATOMIC_DETAIL_BACKEND linux_arm
+#define BOOST_ATOMIC_DETAIL_CORE_BACKEND linux_arm
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
-#define BOOST_ATOMIC_DETAIL_BACKEND windows
+#define BOOST_ATOMIC_DETAIL_CORE_BACKEND windows
#endif
-#endif // !defined(BOOST_ATOMIC_DETAIL_BACKEND)
+#endif // !defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND)
+
+// Waiting and notifying operations backends
+#if defined(BOOST_WINDOWS)
+
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND windows
+
+#else // defined(BOOST_WINDOWS)
+
+#include <boost/atomic/detail/futex.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX)
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND futex
+#elif defined(__APPLE__)
+#if !defined(BOOST_ATOMIC_NO_DARWIN_ULOCK) && (\
+ (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101200) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 100000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 100000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 30000))
+// Darwin 16+ supports ulock API
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND darwin_ulock
+#endif // __ENVIRONMENT_*_VERSION_MIN_REQUIRED__
+#elif defined(__FreeBSD__)
+#include <sys/param.h>
+// FreeBSD prior to 7.0 had _umtx_op with a different signature
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 700000
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND freebsd_umtx
+#endif // defined(__FreeBSD_version) && __FreeBSD_version >= 700000
+#elif defined(__DragonFly__)
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND dragonfly_umtx
+#endif
-#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK)
+#endif // defined(BOOST_WINDOWS)
-#if !defined(BOOST_ATOMIC_DETAIL_BACKEND)
-#define BOOST_ATOMIC_DETAIL_BACKEND emulated
-#define BOOST_ATOMIC_EMULATED
-#endif
+#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#if !defined(BOOST_ATOMIC_DETAIL_FP_BACKEND)
#define BOOST_ATOMIC_DETAIL_FP_BACKEND generic
@@ -155,9 +190,20 @@
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_GENERIC
#endif
-#define BOOST_ATOMIC_DETAIL_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_BACKEND).hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_WAIT_BACKEND)
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND generic
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND_GENERIC
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND)
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND).hpp>
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND)
+#define BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_CORE_BACKEND).hpp>
+#endif
#define BOOST_ATOMIC_DETAIL_FP_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_FP_BACKEND).hpp>
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_EXTRA_BACKEND).hpp>
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND).hpp>
+#define BOOST_ATOMIC_DETAIL_WAIT_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_WAIT_BACKEND).hpp>
#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_traits.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_traits.hpp
new file mode 100644
index 0000000000..1ba9d8fb09
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_traits.hpp
@@ -0,0 +1,187 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/storage_traits.hpp
+ *
+ * This header defines underlying types used as storage
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TRAITS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_STORAGE_TRAITS_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/string_ops.hpp>
+#include <boost/atomic/detail/aligned_variable.hpp>
+#include <boost/atomic/detail/type_traits/alignment_of.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T >
+BOOST_FORCEINLINE void non_atomic_load(T const volatile& from, T& to) BOOST_NOEXCEPT
+{
+ to = from;
+}
+
+template< std::size_t Size, std::size_t Alignment = 1u >
+struct BOOST_ATOMIC_DETAIL_MAY_ALIAS buffer_storage
+{
+ typedef unsigned char data_type[Size];
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(Alignment, data_type, data);
+
+ BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
+ {
+ return (data[0] == 0u && BOOST_ATOMIC_DETAIL_MEMCMP(data, data + 1, Size - 1u) == 0);
+ }
+
+ BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT
+ {
+ return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) == 0;
+ }
+
+ BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT
+ {
+ return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) != 0;
+ }
+};
+
+template< std::size_t Size, std::size_t Alignment >
+BOOST_FORCEINLINE void non_atomic_load(buffer_storage< Size, Alignment > const volatile& from, buffer_storage< Size, Alignment >& to) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_MEMCPY(to.data, const_cast< unsigned char const* >(from.data), Size);
+}
+
+template< std::size_t Size >
+struct storage_traits
+{
+ typedef buffer_storage< Size, 1u > type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = 1u;
+
+ // By default, prefer the maximum supported alignment
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 16u;
+};
+
+template< >
+struct storage_traits< 1u >
+{
+ typedef boost::uint8_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = 1u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 1u;
+};
+
+template< >
+struct storage_traits< 2u >
+{
+ typedef boost::uint16_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = atomics::detail::alignment_of< boost::uint16_t >::value;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 2u;
+};
+
+template< >
+struct storage_traits< 4u >
+{
+ typedef boost::uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = atomics::detail::alignment_of< boost::uint32_t >::value;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 4u;
+};
+
+template< >
+struct storage_traits< 8u >
+{
+ typedef boost::uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = atomics::detail::alignment_of< boost::uint64_t >::value;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 8u;
+};
+
+#if defined(BOOST_HAS_INT128)
+
+template< >
+struct storage_traits< 16u >
+{
+ typedef boost::uint128_type BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = atomics::detail::alignment_of< boost::uint128_type >::value;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 16u;
+};
+
+#else
+
+#if (__cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)) &&\
+ (!defined(BOOST_GCC_VERSION) || BOOST_GCC_VERSION >= 40900)
+using std::max_align_t;
+#else
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// alignment is sensitive to packing
+#pragma warning(disable: 4121)
+#endif
+
+class max_align_helper;
+union max_align_t
+{
+ void* ptr;
+ void (*fun_ptr)();
+ int max_align_helper::*mem_ptr;
+ void (max_align_helper::*mem_fun_ptr)();
+ long long ll;
+ long double ld;
+#if defined(BOOST_HAS_INT128)
+ boost::int128_type i128;
+#endif
+#if defined(BOOST_HAS_FLOAT128)
+ boost::float128_type f128;
+#endif
+};
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
+
+template< >
+struct storage_traits< 16u >
+{
+ typedef buffer_storage< 16u, atomics::detail::alignment_of< atomics::detail::max_align_t >::value > type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t native_alignment = atomics::detail::alignment_of< atomics::detail::max_align_t >::value;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t alignment = 16u;
+};
+
+#endif
+
+template< typename T >
+struct storage_size_of
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t size = sizeof(T);
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = (size == 3u ? 4u : (size >= 5u && size <= 7u ? 8u : (size >= 9u && size <= 15u ? 16u : size)));
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_STORAGE_TRAITS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_type.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_type.hpp
deleted file mode 100644
index 5d824d3a27..0000000000
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/storage_type.hpp
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2009 Helge Bahmann
- * Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2013 - 2014 Andrey Semashev
- */
-/*!
- * \file atomic/detail/storage_type.hpp
- *
- * This header defines underlying types used as storage
- */
-
-#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
-
-#include <cstddef>
-#include <boost/cstdint.hpp>
-#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/string_ops.hpp>
-
-#ifdef BOOST_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-template< typename T >
-BOOST_FORCEINLINE void non_atomic_load(T const volatile& from, T& to) BOOST_NOEXCEPT
-{
- to = from;
-}
-
-template< std::size_t Size >
-struct BOOST_ATOMIC_DETAIL_MAY_ALIAS buffer_storage
-{
- BOOST_ALIGNMENT(16) unsigned char data[Size];
-
- BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
- {
- return (data[0] == 0u && BOOST_ATOMIC_DETAIL_MEMCMP(data, data + 1, Size - 1) == 0);
- }
-
- BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT
- {
- return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) == 0;
- }
-
- BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT
- {
- return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) != 0;
- }
-};
-
-template< std::size_t Size >
-BOOST_FORCEINLINE void non_atomic_load(buffer_storage< Size > const volatile& from, buffer_storage< Size >& to) BOOST_NOEXCEPT
-{
- BOOST_ATOMIC_DETAIL_MEMCPY(to.data, const_cast< unsigned char const* >(from.data), Size);
-}
-
-template< std::size_t Size >
-struct make_storage_type
-{
- typedef buffer_storage< Size > type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 1u >
-{
- typedef boost::uint8_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 2u >
-{
- typedef boost::uint16_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(2) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 4u >
-{
- typedef boost::uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(4) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 8u >
-{
- typedef boost::uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(8) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-#if defined(BOOST_HAS_INT128)
-
-template< >
-struct make_storage_type< 16u >
-{
- typedef boost::uint128_type BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(16) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-#elif !defined(BOOST_NO_ALIGNMENT)
-
-struct BOOST_ATOMIC_DETAIL_MAY_ALIAS storage128_t
-{
- typedef boost::uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS element_type;
-
- element_type data[2];
-
- BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
- {
- return (data[0] | data[1]) == 0u;
- }
-};
-
-BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
-{
- return ((left.data[0] ^ right.data[0]) | (left.data[1] ^ right.data[1])) == 0u;
-}
-BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
-{
- return !(left == right);
-}
-
-BOOST_FORCEINLINE void non_atomic_load(storage128_t const volatile& from, storage128_t& to) BOOST_NOEXCEPT
-{
- to.data[0] = from.data[0];
- to.data[1] = from.data[1];
-}
-
-template< >
-struct make_storage_type< 16u >
-{
- typedef storage128_t type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(16) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-#endif
-
-template< typename T >
-struct storage_size_of
-{
- static BOOST_CONSTEXPR_OR_CONST std::size_t size = sizeof(T);
- static BOOST_CONSTEXPR_OR_CONST std::size_t value = (size == 3u ? 4u : (size >= 5u && size <= 7u ? 8u : (size >= 9u && size <= 15u ? 16u : size)));
-};
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/alignment_of.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/alignment_of.hpp
new file mode 100644
index 0000000000..bc5431fba6
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/alignment_of.hpp
@@ -0,0 +1,51 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/alignment_of.hpp
+ *
+ * This header defines \c alignment_of type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_ALIGNMENT_OF_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_ALIGNMENT_OF_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) ||\
+ (defined(BOOST_GCC) && BOOST_GCC < 80100) ||\
+ (defined(BOOST_CLANG) && !defined(__apple_build_version__) && __clang_major__ < 9) ||\
+ (defined(BOOST_CLANG) && defined(__apple_build_version__) && __clang_major__ < 10)
+// For some compilers std::alignment_of gives the wrong result for 64-bit types on 32-bit targets
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_STD_ALIGNMENT_OF
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_STD_ALIGNMENT_OF)
+#include <type_traits>
+#else
+#include <boost/type_traits/alignment_of.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_STD_ALIGNMENT_OF)
+using std::alignment_of;
+#else
+using boost::alignment_of;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_ALIGNMENT_OF_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/has_unique_object_representations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/has_unique_object_representations.hpp
new file mode 100644
index 0000000000..8c72f15ea3
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/has_unique_object_representations.hpp
@@ -0,0 +1,143 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/has_unique_object_representations.hpp
+ *
+ * This header defines \c has_unique_object_representations type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+#include <type_traits>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if (defined(__cpp_lib_has_unique_object_representations) && __cpp_lib_has_unique_object_representations >= 201606) || \
+ (defined(_CPPLIB_VER) && _CPPLIB_VER >= 650 && defined(_HAS_CXX17) && _HAS_CXX17 != 0)
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+using std::has_unique_object_representations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#else // defined(__cpp_lib_has_unique_object_representations) ...
+
+#if (defined(__GNUC__) && __GNUC__ >= 7) || (defined(BOOST_MSVC) && BOOST_MSVC >= 1929) || \
+ (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1900)
+#define BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS(x) __has_unique_object_representations(x)
+#elif defined(__is_identifier)
+#if !__is_identifier(__has_unique_object_representations)
+#define BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS(x) __has_unique_object_representations(x)
+#endif
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
+
+#include <cstddef>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T >
+struct has_unique_object_representations :
+ public atomics::detail::integral_constant< bool, BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS(T) >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< T[] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T, std::size_t N >
+struct has_unique_object_representations< T[N] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< const T > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< volatile T > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< const volatile T > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< const T[] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< volatile T[] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T >
+struct has_unique_object_representations< const volatile T[] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T, std::size_t N >
+struct has_unique_object_representations< const T[N] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T, std::size_t N >
+struct has_unique_object_representations< volatile T[N] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+template< typename T, std::size_t N >
+struct has_unique_object_representations< const volatile T[N] > :
+ public atomics::detail::has_unique_object_representations< T >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#else // defined(BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
+
+#define BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS
+
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
+
+#endif // defined(__cpp_lib_has_unique_object_representations) ...
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_enum.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_enum.hpp
new file mode 100644
index 0000000000..16ff3be13e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_enum.hpp
@@ -0,0 +1,42 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_enum.hpp
+ *
+ * This header defines \c is_enum type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_ENUM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_ENUM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+#include <boost/type_traits/is_enum.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+using std::is_enum;
+#else
+using boost::is_enum;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_ENUM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_floating_point.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_floating_point.hpp
index c425112b8b..46e2ab85eb 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_floating_point.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_floating_point.hpp
@@ -15,7 +15,8 @@
#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_FLOATING_POINT_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+// Some versions of libstdc++ don't consider __float128 a floating point type. Use Boost.TypeTraits because of that.
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_FLOAT128)
#include <type_traits>
#else
#include <boost/type_traits/is_floating_point.hpp>
@@ -29,7 +30,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_FLOAT128)
using std::is_floating_point;
#else
using boost::is_floating_point;
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp
new file mode 100644
index 0000000000..bc8f6ee524
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp
@@ -0,0 +1,46 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_nothrow_default_constructible.hpp
+ *
+ * This header defines \c is_nothrow_default_constructible type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_NOTHROW_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_NOTHROW_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+#include <boost/type_traits/has_nothrow_constructor.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+using std::is_nothrow_default_constructible;
+#elif !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
+template< typename T >
+using is_nothrow_default_constructible = boost::has_nothrow_constructor< T >;
+#else
+template< typename T >
+struct is_nothrow_default_constructible : public boost::has_nothrow_constructor< T > {};
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_NOTHROW_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_trivially_copyable.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_trivially_copyable.hpp
new file mode 100644
index 0000000000..732321202c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/type_traits/is_trivially_copyable.hpp
@@ -0,0 +1,45 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_trivially_copyable.hpp
+ *
+ * This header defines \c is_trivially_copyable type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_COPYABLE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_COPYABLE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+// For std::is_trivially_copyable we require a genuine support from the compiler.
+// Fallback to is_pod or a false negative result in Boost.TypeTraits is not acceptable
+// as this trait will be used in a static assert and may deny valid uses of boost::atomic/atomic_ref.
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+using std::is_trivially_copyable;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_COPYABLE_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_capabilities.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_capabilities.hpp
new file mode 100644
index 0000000000..d9137b0d2f
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_capabilities.hpp
@@ -0,0 +1,363 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_capabilities.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPABILITIES_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPABILITIES_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+#include <boost/atomic/detail/float_sizes.hpp>
+#endif
+
+#if !defined(BOOST_ATOMIC_EMULATED) && !defined(BOOST_ATOMIC_DETAIL_WAIT_BACKEND_GENERIC)
+#include BOOST_ATOMIC_DETAIL_WAIT_BACKEND_HEADER(boost/atomic/detail/wait_caps_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY 0
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY 0
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY 0
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY 0
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY 0
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY 0
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY 0
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY 0
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT128_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT128_WAIT_NOTIFY 0
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT128_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_INT128_IPC_WAIT_NOTIFY 0
+#endif
+
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR8_T_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR8_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR8_T_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR8_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR16_T_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR16_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR16_T_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR16_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR32_T_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR32_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_CHAR32_T_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_CHAR32_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_WCHAR_T_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_SHORT_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
+#define BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_INT_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
+#define BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_INT_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LLONG_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_POINTER_WAIT_NOTIFY
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_WAIT_NOTIFY 0
+#endif
+#endif
+
+#define BOOST_ATOMIC_HAS_NATIVE_ADDRESS_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_POINTER_WAIT_NOTIFY
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_POINTER_IPC_WAIT_NOTIFY
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_POINTER_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#define BOOST_ATOMIC_HAS_NATIVE_ADDRESS_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_POINTER_IPC_WAIT_NOTIFY
+
+// We store bools in 1-byte storage in all backends
+#ifndef BOOST_ATOMIC_HAS_NATIVE_BOOL_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_BOOL_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_BOOL_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_BOOL_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT8_IPC_WAIT_NOTIFY
+#endif
+
+#ifndef BOOST_ATOMIC_HAS_NATIVE_FLAG_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_FLAG_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#endif
+#ifndef BOOST_ATOMIC_HAS_NATIVE_FLAG_IPC_WAIT_NOTIFY
+#define BOOST_ATOMIC_HAS_NATIVE_FLAG_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_WAIT_NOTIFY 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_FLOAT_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_WAIT_NOTIFY 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_DOUBLE_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_WAIT_NOTIFY 0
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT16_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE <= 16
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY BOOST_ATOMIC_HAS_NATIVE_INT128_IPC_WAIT_NOTIFY
+#else
+#define BOOST_ATOMIC_HAS_NATIVE_LONG_DOUBLE_IPC_WAIT_NOTIFY 0
+#endif
+#endif
+
+#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPABILITIES_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_darwin_ulock.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_darwin_ulock.hpp
new file mode 100644
index 0000000000..45c0ccba2f
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_darwin_ulock.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_caps_darwin_ulock.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPS_DARWIN_ULOCK_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPS_DARWIN_ULOCK_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+
+// Darwin 19+ (Mac OS 10.15+, iOS 13.0+, tvOS 13.0+, watchOS 6.0+) adds support for 64-bit
+// and inter-process ulock operations.
+// https://shift.click/blog/futex-like-apis/#darwin-macos-ios-tvos-watchos-and-more
+// https://github.com/thomcc/ulock-sys/blob/2597e63cc5372459a903c292a3919d385a3e3789/src/lib.rs
+#if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 130000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 60000)
+#define BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64
+#define BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED
+#endif
+
+// Darwin 20+ (Mac OS 11.0+, iOS 14.0+, tvOS 14.0+, watchOS 7.0+) introduces __ulock_wait2, which accepts
+// the timeout in nanoseconds. __ulock_wait is a wrapper on top of __ulock_wait2.
+#if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 110000) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 140000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 140000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 70000)
+#define BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_WAIT2
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY BOOST_ATOMIC_INT64_LOCK_FREE
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT64_LOCK_FREE
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPS_DARWIN_ULOCK_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_dragonfly_umtx.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_dragonfly_umtx.hpp
new file mode 100644
index 0000000000..7e9b54fb5d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_dragonfly_umtx.hpp
@@ -0,0 +1,30 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_caps_dragonfly_umtx.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPS_DRAGONFLY_UMTX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPS_DRAGONFLY_UMTX_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// DragonFly BSD umtx_sleep/umtx_wakeup use physical address to the atomic object as a key, which means it should support address-free operations.
+// https://man.dragonflybsd.org/?command=umtx&section=2
+
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPS_DRAGONFLY_UMTX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_freebsd_umtx.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_freebsd_umtx.hpp
new file mode 100644
index 0000000000..5f4359c09e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_freebsd_umtx.hpp
@@ -0,0 +1,40 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_caps_freebsd_umtx.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPS_FREEBSD_UMTX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPS_FREEBSD_UMTX_HPP_INCLUDED_
+
+#include <sys/umtx.h>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// FreeBSD _umtx_op uses physical address to the atomic object as a key, which means it should support address-free operations.
+// https://www.freebsd.org/cgi/man.cgi?query=_umtx_op&apropos=0&sektion=2&manpath=FreeBSD+11-current&format=html
+
+#if (defined(UMTX_OP_WAIT_UINT) && BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4) ||\
+ (defined(UMTX_OP_WAIT) && BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4)
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#endif
+
+#if defined(UMTX_OP_WAIT) && BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY BOOST_ATOMIC_INT64_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT64_LOCK_FREE
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPS_FREEBSD_UMTX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_futex.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_futex.hpp
new file mode 100644
index 0000000000..e61d62bf18
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_futex.hpp
@@ -0,0 +1,31 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_caps_futex.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPS_FUTEX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPS_FUTEX_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+#include <boost/atomic/detail/futex.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX)
+// futexes are always 32-bit and they always supported address-free operations
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_IPC_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX)
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPS_FUTEX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_windows.hpp
new file mode 100644
index 0000000000..a3c065a972
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_caps_windows.hpp
@@ -0,0 +1,57 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_caps_windows.hpp
+ *
+ * This header defines waiting/notifying operations capabilities macros.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_CAPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_CAPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/winapi/config.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// MSDN says WaitOnAddress, WakeByAddressSingle and WakeByAddressAll only support notifications between threads of the same process, so no address-free operations.
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitonaddress
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-wakebyaddresssingle
+// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-wakebyaddressall
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN8 && (BOOST_WINAPI_PARTITION_APP || BOOST_WINAPI_PARTITION_SYSTEM)
+
+#define BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS
+
+#define BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY BOOST_ATOMIC_INT8_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY BOOST_ATOMIC_INT16_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY BOOST_ATOMIC_INT64_LOCK_FREE
+
+#else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN8 && (BOOST_WINAPI_PARTITION_APP || BOOST_WINAPI_PARTITION_SYSTEM)
+
+// Since we'll detect availability of WaitOnAddress etc. at run time, we have to define capability macros to 1 instead of 2
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+#define BOOST_ATOMIC_HAS_NATIVE_INT8_WAIT_NOTIFY 1
+#endif
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+#define BOOST_ATOMIC_HAS_NATIVE_INT16_WAIT_NOTIFY 1
+#endif
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+#define BOOST_ATOMIC_HAS_NATIVE_INT32_WAIT_NOTIFY 1
+#endif
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+#define BOOST_ATOMIC_HAS_NATIVE_INT64_WAIT_NOTIFY 1
+#endif
+
+#endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN8 && (BOOST_WINAPI_PARTITION_APP || BOOST_WINAPI_PARTITION_SYSTEM)
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_CAPS_WINDOWS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_on_address.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_on_address.hpp
new file mode 100644
index 0000000000..a09734f3ba
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_on_address.hpp
@@ -0,0 +1,65 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_on_address.hpp
+ *
+ * This header contains declaration of runtime detection of \c WaitOnAddress and related APIs on Windows.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_ON_ADDRESS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_ON_ADDRESS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/winapi/basic_types.hpp>
+#include <boost/atomic/detail/link.hpp>
+#include <boost/atomic/detail/once_flag.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+typedef boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+wait_on_address_t(
+ volatile boost::winapi::VOID_* addr,
+ boost::winapi::PVOID_ compare_addr,
+ boost::winapi::SIZE_T_ size,
+ boost::winapi::DWORD_ timeout_ms);
+
+typedef boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+wake_by_address_t(boost::winapi::PVOID_ addr);
+
+extern BOOST_ATOMIC_DECL wait_on_address_t* wait_on_address;
+extern BOOST_ATOMIC_DECL wake_by_address_t* wake_by_address_single;
+extern BOOST_ATOMIC_DECL wake_by_address_t* wake_by_address_all;
+
+extern BOOST_ATOMIC_DECL once_flag wait_functions_once_flag;
+BOOST_ATOMIC_DECL void initialize_wait_functions() BOOST_NOEXCEPT;
+
+BOOST_FORCEINLINE void ensure_wait_functions_initialized() BOOST_NOEXCEPT
+{
+ BOOST_STATIC_ASSERT_MSG(once_flag_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for bytes");
+ if (BOOST_LIKELY(once_flag_operations::load(wait_functions_once_flag.m_flag, boost::memory_order_acquire) == 0u))
+ return;
+
+ initialize_wait_functions();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_ON_ADDRESS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations.hpp
new file mode 100644
index 0000000000..d9d1b6ba2c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations.hpp
@@ -0,0 +1,28 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_operations.hpp
+ *
+ * This header defines waiting/notifying atomic operations, including the generic version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/wait_ops_generic.hpp>
+#include <boost/atomic/detail/wait_ops_emulated.hpp>
+
+#if !defined(BOOST_ATOMIC_DETAIL_WAIT_BACKEND_GENERIC)
+#include BOOST_ATOMIC_DETAIL_WAIT_BACKEND_HEADER(boost/atomic/detail/wait_ops_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations_fwd.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations_fwd.hpp
new file mode 100644
index 0000000000..1870af767c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_operations_fwd.hpp
@@ -0,0 +1,43 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c wait_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template<
+ typename Base,
+ std::size_t Size = sizeof(typename Base::storage_type),
+ bool AlwaysLockFree = Base::is_always_lock_free,
+ bool Interprocess = Base::is_interprocess
+>
+struct wait_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_darwin_ulock.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_darwin_ulock.hpp
new file mode 100644
index 0000000000..ae37880e5e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_darwin_ulock.hpp
@@ -0,0 +1,158 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_darwin_ulock.hpp
+ *
+ * This header contains implementation of the waiting/notifying atomic operations based on Darwin systems using ulock syscalls.
+ *
+ * https://github.com/apple/darwin-xnu/blob/master/bsd/sys/ulock.h
+ * https://github.com/apple/darwin-xnu/blob/master/bsd/kern/sys_ulock.c
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_DARWIN_ULOCK_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_DARWIN_ULOCK_HPP_INCLUDED_
+
+#include <stdint.h>
+#include <cerrno>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/wait_capabilities.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+extern "C" {
+// Timeout is in microseconds with zero meaning no timeout
+int __ulock_wait(uint32_t operation, void* addr, uint64_t value, uint32_t timeout);
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_WAIT2)
+// Timeout is in nanoseconds with zero meaning no timeout
+int __ulock_wait2(uint32_t operation, void* addr, uint64_t value, uint64_t timeout, uint64_t value2);
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_WAIT2)
+int __ulock_wake(uint32_t operation, void* addr, uint64_t wake_value);
+} // extern "C"
+
+enum ulock_op
+{
+ ulock_op_compare_and_wait = 1,
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+ ulock_op_compare_and_wait_shared = 3,
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+ ulock_op_compare_and_wait64 = 5,
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+ ulock_op_compare_and_wait64_shared = 6,
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+
+ // Flags for __ulock_wake
+ ulock_flag_wake_all = 0x00000100,
+
+ // Generic flags
+ ulock_flag_no_errno = 0x01000000
+};
+
+template< typename Base, uint32_t Opcode >
+struct wait_operations_darwin_ulock_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_WAIT2)
+ __ulock_wait2(Opcode | ulock_flag_no_errno, const_cast< storage_type* >(&storage), old_val, 0u, 0u);
+#else
+ __ulock_wait(Opcode | ulock_flag_no_errno, const_cast< storage_type* >(&storage), old_val, 0u);
+#endif
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ while (true)
+ {
+ const int res = __ulock_wake(Opcode | ulock_flag_no_errno, const_cast< storage_type* >(&storage), 0u);
+ if (BOOST_LIKELY(res != -EINTR))
+ break;
+ }
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ while (true)
+ {
+ const int res = __ulock_wake(Opcode | ulock_flag_wake_all | ulock_flag_no_errno, const_cast< storage_type* >(&storage), 0u);
+ if (BOOST_LIKELY(res != -EINTR))
+ break;
+ }
+ }
+};
+
+template< typename Base >
+struct wait_operations< Base, sizeof(uint32_t), true, false > :
+ public wait_operations_darwin_ulock_common< Base, ulock_op_compare_and_wait >
+{
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+
+template< typename Base >
+struct wait_operations< Base, sizeof(uint32_t), true, true > :
+ public wait_operations_darwin_ulock_common< Base, ulock_op_compare_and_wait_shared >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+
+template< typename Base >
+struct wait_operations< Base, sizeof(uint64_t), true, false > :
+ public wait_operations_darwin_ulock_common< Base, ulock_op_compare_and_wait64 >
+{
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+
+template< typename Base >
+struct wait_operations< Base, sizeof(uint64_t), true, true > :
+ public wait_operations_darwin_ulock_common< Base, ulock_op_compare_and_wait64_shared >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK_SHARED)
+#endif // defined(BOOST_ATOMIC_DETAIL_HAS_DARWIN_ULOCK64)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_DARWIN_ULOCK_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_dragonfly_umtx.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_dragonfly_umtx.hpp
new file mode 100644
index 0000000000..466adbdc64
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_dragonfly_umtx.hpp
@@ -0,0 +1,75 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_dragonfly_umtx.hpp
+ *
+ * This header contains implementation of the waiting/notifying atomic operations based on DragonFly BSD umtx.
+ * https://man.dragonflybsd.org/?command=umtx&section=2
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_DRAGONFLY_UMTX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_DRAGONFLY_UMTX_HPP_INCLUDED_
+
+#include <unistd.h>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, bool Interprocess >
+struct wait_operations< Base, sizeof(int), true, Interprocess > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ ::umtx_sleep(reinterpret_cast< int* >(const_cast< storage_type* >(&storage)), static_cast< int >(old_val), 0);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ::umtx_wakeup(reinterpret_cast< int* >(const_cast< storage_type* >(&storage)), 1);
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ::umtx_wakeup(reinterpret_cast< int* >(const_cast< storage_type* >(&storage)), 0);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_DRAGONFLY_UMTX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_emulated.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_emulated.hpp
new file mode 100644
index 0000000000..75c0354817
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_emulated.hpp
@@ -0,0 +1,97 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_emulated.hpp
+ *
+ * This header contains emulated (lock-based) implementation of the waiting and notifying atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_EMULATED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Emulated implementation of waiting and notifying operations
+template< typename Base >
+struct wait_operations_emulated :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef lock_pool::scoped_lock< base_type::storage_alignment, true > scoped_lock;
+ typedef lock_pool::scoped_wait_state< base_type::storage_alignment > scoped_wait_state;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = false;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return false;
+ }
+
+ static
+#if defined(BOOST_MSVC) && BOOST_MSVC < 1500
+ // In some cases, when this function is inlined, MSVC-8 (VS2005) x64 generates broken code that returns a bogus value from this function.
+ BOOST_NOINLINE
+#endif
+ storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ storage_type const& s = const_cast< storage_type const& >(storage);
+ scoped_wait_state wait_state(&storage);
+ storage_type new_val = s;
+ while (new_val == old_val)
+ {
+ wait_state.wait();
+ new_val = s;
+ }
+
+ return new_val;
+ }
+
+ static void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ scoped_lock lock(&storage);
+ lock_pool::notify_one(lock.get_lock_state(), &storage);
+ }
+
+ static void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
+ scoped_lock lock(&storage);
+ lock_pool::notify_all(lock.get_lock_state(), &storage);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Interprocess >
+struct wait_operations< Base, Size, false, Interprocess > :
+ public wait_operations_emulated< Base >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_EMULATED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_freebsd_umtx.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_freebsd_umtx.hpp
new file mode 100644
index 0000000000..28ef47ef61
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_freebsd_umtx.hpp
@@ -0,0 +1,119 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_freebsd_umtx.hpp
+ *
+ * This header contains implementation of the waiting/notifying atomic operations based on FreeBSD _umtx_op syscall.
+ * https://www.freebsd.org/cgi/man.cgi?query=_umtx_op&apropos=0&sektion=2&manpath=FreeBSD+11-current&format=html
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_
+
+#include <sys/types.h>
+#include <sys/umtx.h>
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(UMTX_OP_WAIT_UINT) || defined(UMTX_OP_WAIT)
+
+template< typename Base >
+struct wait_operations_freebsd_umtx_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ::_umtx_op(const_cast< storage_type* >(&storage), UMTX_OP_WAKE, 1u, NULL, NULL);
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ::_umtx_op(const_cast< storage_type* >(&storage), UMTX_OP_WAKE, (~static_cast< unsigned int >(0u)) >> 1, NULL, NULL);
+ }
+};
+
+#endif // defined(UMTX_OP_WAIT_UINT) || defined(UMTX_OP_WAIT)
+
+// UMTX_OP_WAIT_UINT only appeared in FreeBSD 8.0
+#if defined(UMTX_OP_WAIT_UINT) && BOOST_ATOMIC_DETAIL_SIZEOF_INT < BOOST_ATOMIC_DETAIL_SIZEOF_LONG
+
+template< typename Base, bool Interprocess >
+struct wait_operations< Base, sizeof(unsigned int), true, Interprocess > :
+ public wait_operations_freebsd_umtx_common< Base >
+{
+ typedef wait_operations_freebsd_umtx_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ ::_umtx_op(const_cast< storage_type* >(&storage), UMTX_OP_WAIT_UINT, old_val, NULL, NULL);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+};
+
+#endif // defined(UMTX_OP_WAIT_UINT) && BOOST_ATOMIC_DETAIL_SIZEOF_INT < BOOST_ATOMIC_DETAIL_SIZEOF_LONG
+
+#if defined(UMTX_OP_WAIT)
+
+template< typename Base, bool Interprocess >
+struct wait_operations< Base, sizeof(unsigned long), true, Interprocess > :
+ public wait_operations_freebsd_umtx_common< Base >
+{
+ typedef wait_operations_freebsd_umtx_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ ::_umtx_op(const_cast< storage_type* >(&storage), UMTX_OP_WAIT, old_val, NULL, NULL);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+};
+
+#endif // defined(UMTX_OP_WAIT)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_FREEBSD_UMTX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_futex.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_futex.hpp
new file mode 100644
index 0000000000..ec308a2d6b
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_futex.hpp
@@ -0,0 +1,111 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_futex.hpp
+ *
+ * This header contains implementation of the waiting/notifying atomic operations based on futexes.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_FUTEX_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_FUTEX_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/futex.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct wait_operations< Base, 4u, true, false > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ atomics::detail::futex_wait_private(const_cast< storage_type* >(&storage), old_val);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ atomics::detail::futex_signal_private(const_cast< storage_type* >(&storage));
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ atomics::detail::futex_broadcast_private(const_cast< storage_type* >(&storage));
+ }
+};
+
+template< typename Base >
+struct wait_operations< Base, 4u, true, true > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ atomics::detail::futex_wait(const_cast< storage_type* >(&storage), old_val);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ atomics::detail::futex_signal(const_cast< storage_type* >(&storage));
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ atomics::detail::futex_broadcast(const_cast< storage_type* >(&storage));
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_FUTEX_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_generic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_generic.hpp
new file mode 100644
index 0000000000..8462a44a7e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_generic.hpp
@@ -0,0 +1,143 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_generic.hpp
+ *
+ * This header contains generic (lock-based) implementation of the waiting/notifying atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_GENERIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_GENERIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/pause.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of waiting/notifying operations
+template< typename Base, bool Interprocess >
+struct wait_operations_generic;
+
+template< typename Base >
+struct wait_operations_generic< Base, false > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef lock_pool::scoped_lock< base_type::storage_alignment, true > scoped_lock;
+ typedef lock_pool::scoped_wait_state< base_type::storage_alignment > scoped_wait_state;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = false;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return false;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ if (new_val == old_val)
+ {
+ scoped_wait_state wait_state(&storage);
+ new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ wait_state.wait();
+ new_val = base_type::load(storage, order);
+ }
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ scoped_lock lock(&storage);
+ lock_pool::notify_one(lock.get_lock_state(), &storage);
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ scoped_lock lock(&storage);
+ lock_pool::notify_all(lock.get_lock_state(), &storage);
+ }
+};
+
+template< typename Base >
+struct wait_operations_generic< Base, true > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = false;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return false;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ if (new_val == old_val)
+ {
+ for (unsigned int i = 0u; i < 16u; ++i)
+ {
+ atomics::detail::pause();
+ new_val = base_type::load(storage, order);
+ if (new_val != old_val)
+ goto finish;
+ }
+
+ do
+ {
+ atomics::detail::wait_some();
+ new_val = base_type::load(storage, order);
+ }
+ while (new_val == old_val);
+ }
+
+ finish:
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile&) BOOST_NOEXCEPT
+ {
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile&) BOOST_NOEXCEPT
+ {
+ }
+};
+
+template< typename Base, std::size_t Size, bool Interprocess >
+struct wait_operations< Base, Size, true, Interprocess > :
+ public wait_operations_generic< Base, Interprocess >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_GENERIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_windows.hpp
new file mode 100644
index 0000000000..5e77b53257
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/wait_ops_windows.hpp
@@ -0,0 +1,179 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/wait_ops_windows.hpp
+ *
+ * This header contains implementation of the waiting/notifying atomic operations on Windows.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_WAIT_OPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_WAIT_OPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/winapi/wait_constants.hpp>
+#include <boost/atomic/detail/wait_operations_fwd.hpp>
+#include <boost/atomic/detail/wait_capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+#include <boost/winapi/wait_on_address.hpp>
+#if (defined(BOOST_ATOMIC_FORCE_AUTO_LINK) || (!defined(BOOST_ALL_NO_LIB) && !defined(BOOST_ATOMIC_NO_LIB))) && !defined(BOOST_ATOMIC_NO_SYNCHRONIZATION_LIB)
+#define BOOST_LIB_NAME "synchronization"
+#if defined(BOOST_AUTO_LINK_NOMANGLE)
+#include <boost/config/auto_link.hpp>
+#else // defined(BOOST_AUTO_LINK_NOMANGLE)
+#define BOOST_AUTO_LINK_NOMANGLE
+#include <boost/config/auto_link.hpp>
+#undef BOOST_AUTO_LINK_NOMANGLE
+#endif // defined(BOOST_AUTO_LINK_NOMANGLE)
+#endif // (defined(BOOST_ATOMIC_FORCE_AUTO_LINK) || (!defined(BOOST_ALL_NO_LIB) && !defined(BOOST_ATOMIC_NO_LIB))) && !defined(BOOST_ATOMIC_NO_SYNCHRONIZATION_LIB)
+#else // defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+#include <cstddef>
+#include <boost/atomic/detail/wait_on_address.hpp>
+#include <boost/atomic/detail/wait_ops_generic.hpp>
+#endif // defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+
+template< typename Base, std::size_t Size >
+struct wait_operations_windows :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = true;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ return true;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ boost::winapi::WaitOnAddress(const_cast< storage_type* >(&storage), &old_val, Size, boost::winapi::infinite);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ boost::winapi::WakeByAddressSingle(const_cast< storage_type* >(&storage));
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ boost::winapi::WakeByAddressAll(const_cast< storage_type* >(&storage));
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+
+template< typename Base, std::size_t Size >
+struct wait_operations_windows :
+ public atomics::detail::wait_operations_generic< Base, false >
+{
+ typedef atomics::detail::wait_operations_generic< Base, false > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = false;
+
+ static BOOST_FORCEINLINE bool has_native_wait_notify(storage_type const volatile&) BOOST_NOEXCEPT
+ {
+ ensure_wait_functions_initialized();
+ return atomics::detail::wait_on_address != NULL;
+ }
+
+ static BOOST_FORCEINLINE storage_type wait(storage_type const volatile& storage, storage_type old_val, memory_order order) BOOST_NOEXCEPT
+ {
+ ensure_wait_functions_initialized();
+
+ if (BOOST_LIKELY(atomics::detail::wait_on_address != NULL))
+ {
+ storage_type new_val = base_type::load(storage, order);
+ while (new_val == old_val)
+ {
+ atomics::detail::wait_on_address(const_cast< storage_type* >(&storage), &old_val, Size, boost::winapi::infinite);
+ new_val = base_type::load(storage, order);
+ }
+
+ return new_val;
+ }
+ else
+ {
+ return base_type::wait(storage, old_val, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE void notify_one(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ensure_wait_functions_initialized();
+
+ if (BOOST_LIKELY(atomics::detail::wake_by_address_single != NULL))
+ atomics::detail::wake_by_address_single(const_cast< storage_type* >(&storage));
+ else
+ base_type::notify_one(storage);
+ }
+
+ static BOOST_FORCEINLINE void notify_all(storage_type volatile& storage) BOOST_NOEXCEPT
+ {
+ ensure_wait_functions_initialized();
+
+ if (BOOST_LIKELY(atomics::detail::wake_by_address_all != NULL))
+ atomics::detail::wake_by_address_all(const_cast< storage_type* >(&storage));
+ else
+ base_type::notify_all(storage);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_WINDOWS_HAS_WAIT_ON_ADDRESS)
+
+template< typename Base >
+struct wait_operations< Base, 1u, true, false > :
+ public wait_operations_windows< Base, 1u >
+{
+};
+
+template< typename Base >
+struct wait_operations< Base, 2u, true, false > :
+ public wait_operations_windows< Base, 2u >
+{
+};
+
+template< typename Base >
+struct wait_operations< Base, 4u, true, false > :
+ public wait_operations_windows< Base, 4u >
+{
+};
+
+template< typename Base >
+struct wait_operations< Base, 8u, true, false > :
+ public wait_operations_windows< Base, 8u >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_WAIT_OPS_WINDOWS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/fences.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/fences.hpp
index 31e3040578..00b83600bb 100644
--- a/contrib/restricted/boost/atomic/include/boost/atomic/fences.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/fences.hpp
@@ -18,7 +18,8 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
-#include <boost/atomic/detail/operations.hpp>
+#include <boost/atomic/detail/fence_operations.hpp>
+#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -26,36 +27,22 @@
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
- * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
+ * see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
*/
namespace boost {
namespace atomics {
-#if BOOST_ATOMIC_THREAD_FENCE > 0
BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) BOOST_NOEXCEPT
{
- detail::thread_fence(order);
+ atomics::detail::fence_operations::thread_fence(order);
}
-#else
-BOOST_FORCEINLINE void atomic_thread_fence(memory_order) BOOST_NOEXCEPT
-{
- detail::lockpool::thread_fence();
-}
-#endif
-#if BOOST_ATOMIC_SIGNAL_FENCE > 0
BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) BOOST_NOEXCEPT
{
- detail::signal_fence(order);
+ atomics::detail::fence_operations::signal_fence(order);
}
-#else
-BOOST_FORCEINLINE void atomic_signal_fence(memory_order) BOOST_NOEXCEPT
-{
- detail::lockpool::signal_fence();
-}
-#endif
} // namespace atomics
@@ -64,4 +51,6 @@ using atomics::atomic_signal_fence;
} // namespace boost
+#include <boost/atomic/detail/footer.hpp>
+
#endif // BOOST_ATOMIC_FENCES_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic.hpp
new file mode 100644
index 0000000000..d5e4bfbbf8
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic.hpp
@@ -0,0 +1,91 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/ipc_atomic.hpp
+ *
+ * This header contains definition of \c ipc_atomic template.
+ */
+
+#ifndef BOOST_ATOMIC_IPC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_IPC_ATOMIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/classify.hpp>
+#include <boost/atomic/detail/atomic_impl.hpp>
+#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
+#include <boost/atomic/detail/type_traits/is_nothrow_default_constructible.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+
+//! Atomic object for inter-process communication
+template< typename T >
+class ipc_atomic :
+ public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type, true >
+{
+private:
+ typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type, true > base_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+ BOOST_STATIC_ASSERT_MSG(sizeof(value_type) > 0u, "boost::ipc_atomic<T> requires T to be a complete type");
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+ BOOST_STATIC_ASSERT_MSG(atomics::detail::is_trivially_copyable< value_type >::value, "boost::ipc_atomic<T> requires T to be a trivially copyable type");
+#endif
+
+public:
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT ipc_atomic() BOOST_NOEXCEPT_IF(atomics::detail::is_nothrow_default_constructible< value_type >::value) : base_type()
+ {
+ }
+
+ BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT ipc_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v)
+ {
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE operator value_type() const volatile BOOST_NOEXCEPT
+ {
+ return this->load();
+ }
+
+ BOOST_DELETED_FUNCTION(ipc_atomic(ipc_atomic const&))
+ BOOST_DELETED_FUNCTION(ipc_atomic& operator= (ipc_atomic const&))
+ BOOST_DELETED_FUNCTION(ipc_atomic& operator= (ipc_atomic const&) volatile)
+};
+
+} // namespace atomics
+
+using atomics::ipc_atomic;
+
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_IPC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_flag.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_flag.hpp
new file mode 100644
index 0000000000..b90a1f9c63
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_flag.hpp
@@ -0,0 +1,40 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/ipc_atomic_flag.hpp
+ *
+ * This header contains definition of \c ipc_atomic_flag.
+ */
+
+#ifndef BOOST_ATOMIC_IPC_ATOMIC_FLAG_HPP_INCLUDED_
+#define BOOST_ATOMIC_IPC_ATOMIC_FLAG_HPP_INCLUDED_
+
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/atomic_flag_impl.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+
+//! Atomic flag for inter-process communication
+typedef atomics::detail::atomic_flag_impl< true > ipc_atomic_flag;
+
+} // namespace atomics
+
+using atomics::ipc_atomic_flag;
+
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_IPC_ATOMIC_FLAG_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_ref.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_ref.hpp
new file mode 100644
index 0000000000..eddb0cb09d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/ipc_atomic_ref.hpp
@@ -0,0 +1,98 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020-2021 Andrey Semashev
+ */
+/*!
+ * \file atomic/ipc_atomic_ref.hpp
+ *
+ * This header contains definition of \c ipc_atomic_ref template.
+ */
+
+#ifndef BOOST_ATOMIC_IPC_ATOMIC_REF_HPP_INCLUDED_
+#define BOOST_ATOMIC_IPC_ATOMIC_REF_HPP_INCLUDED_
+
+#include <boost/assert.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/classify.hpp>
+#include <boost/atomic/detail/atomic_ref_impl.hpp>
+#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+
+//! Atomic reference to external object for inter-process communication
+template< typename T >
+class ipc_atomic_ref :
+ public atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, true >
+{
+private:
+ typedef atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, true > base_type;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+
+ BOOST_STATIC_ASSERT_MSG(sizeof(value_type) > 0u, "boost::ipc_atomic_ref<T> requires T to be a complete type");
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
+ BOOST_STATIC_ASSERT_MSG(atomics::detail::is_trivially_copyable< value_type >::value, "boost::ipc_atomic_ref<T> requires T to be a trivially copyable type");
+#endif
+
+private:
+ typedef typename base_type::storage_type storage_type;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(ipc_atomic_ref(ipc_atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
+ BOOST_FORCEINLINE explicit ipc_atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
+ {
+ // Check that referenced object alignment satisfies required alignment
+ BOOST_ASSERT((((atomics::detail::uintptr_t)this->m_value) & (base_type::required_alignment - 1u)) == 0u);
+ }
+
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) const BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
+
+ BOOST_FORCEINLINE operator value_type() const BOOST_NOEXCEPT
+ {
+ return this->load();
+ }
+
+ BOOST_DELETED_FUNCTION(ipc_atomic_ref& operator= (ipc_atomic_ref const&))
+};
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX17_DEDUCTION_GUIDES)
+template< typename T >
+ipc_atomic_ref(T&) -> ipc_atomic_ref< T >;
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX17_DEDUCTION_GUIDES)
+
+//! IPC atomic reference factory function
+template< typename T >
+BOOST_FORCEINLINE ipc_atomic_ref< T > make_ipc_atomic_ref(T& value) BOOST_NOEXCEPT
+{
+ return ipc_atomic_ref< T >(value);
+}
+
+} // namespace atomics
+
+using atomics::ipc_atomic_ref;
+using atomics::make_ipc_atomic_ref;
+
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_IPC_ATOMIC_REF_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/memory_order.hpp b/contrib/restricted/boost/atomic/include/boost/memory_order.hpp
index 1f7d202731..ba7d1cdd98 100644
--- a/contrib/restricted/boost/atomic/include/boost/memory_order.hpp
+++ b/contrib/restricted/boost/atomic/include/boost/memory_order.hpp
@@ -54,18 +54,12 @@ enum class memory_order : unsigned int
seq_cst = 14 // acq_rel | 8
};
-#if !defined(BOOST_NO_CXX17_INLINE_VARIABLES)
-#define BOOST_MEMORY_ORDER_INLINE_VARIABLE inline
-#else
-#define BOOST_MEMORY_ORDER_INLINE_VARIABLE
-#endif
-
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_relaxed = memory_order::relaxed;
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_consume = memory_order::consume;
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acquire = memory_order::acquire;
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_release = memory_order::release;
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acq_rel = memory_order::acq_rel;
-BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_seq_cst = memory_order::seq_cst;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_relaxed = memory_order::relaxed;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_consume = memory_order::consume;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acquire = memory_order::acquire;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_release = memory_order::release;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acq_rel = memory_order::acq_rel;
+BOOST_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_seq_cst = memory_order::seq_cst;
#undef BOOST_MEMORY_ORDER_INLINE_VARIABLE
diff --git a/contrib/restricted/boost/atomic/src/bit_operation_tools.hpp b/contrib/restricted/boost/atomic/src/bit_operation_tools.hpp
new file mode 100644
index 0000000000..2c114b92aa
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/bit_operation_tools.hpp
@@ -0,0 +1,82 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file bit_operation_tools.hpp
+ *
+ * This file contains bit operation tools
+ */
+
+#ifndef BOOST_ATOMIC_BIT_OPERATION_TOOLS_HPP_INCLUDED_
+#define BOOST_ATOMIC_BIT_OPERATION_TOOLS_HPP_INCLUDED_
+
+#include <boost/predef/architecture/x86.h>
+
+#if BOOST_ARCH_X86
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#if defined(_MSC_VER)
+extern "C" unsigned char _BitScanForward(unsigned long* index, unsigned long x);
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_BitScanForward)
+#endif
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Counts trailing zero bits
+BOOST_FORCEINLINE unsigned int count_trailing_zeros(unsigned int x)
+{
+#if defined(__GNUC__)
+ return __builtin_ctz(x);
+#elif defined(_MSC_VER)
+ unsigned long index;
+ _BitScanForward(&index, x);
+ return static_cast< unsigned int >(index);
+#else
+ unsigned int index = 0u;
+ if ((x & 0xFFFF) == 0u)
+ {
+ x >>= 16;
+ index += 16u;
+ }
+ if ((x & 0xFF) == 0u)
+ {
+ x >>= 8;
+ index += 8u;
+ }
+ if ((x & 0xF) == 0u)
+ {
+ x >>= 4;
+ index += 4u;
+ }
+ if ((x & 0x3) == 0u)
+ {
+ x >>= 2;
+ index += 2u;
+ }
+ if ((x & 0x1) == 0u)
+ {
+ index += 1u;
+ }
+ return index;
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ARCH_X86
+
+#endif // BOOST_ATOMIC_BIT_OPERATION_TOOLS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/src/cpuid.hpp b/contrib/restricted/boost/atomic/src/cpuid.hpp
new file mode 100644
index 0000000000..452917a33c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/cpuid.hpp
@@ -0,0 +1,86 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file cpuid.hpp
+ *
+ * This file contains declaration of \c cpuid function
+ */
+
+#ifndef BOOST_ATOMIC_CPUID_HPP_INCLUDED_
+#define BOOST_ATOMIC_CPUID_HPP_INCLUDED_
+
+#include <boost/predef/architecture/x86.h>
+
+#if BOOST_ARCH_X86
+
+#if defined(_MSC_VER)
+#include <intrin.h> // __cpuid
+#endif
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#include <boost/atomic/detail/header.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! The function invokes x86 cpuid instruction
+inline void cpuid(uint32_t& eax, uint32_t& ebx, uint32_t& ecx, uint32_t& edx)
+{
+#if defined(__GNUC__)
+#if (defined(__i386__) || defined(__VXWORKS__)) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC >= 50100))
+ // Unless the compiler can do it automatically, we have to backup ebx in 32-bit PIC/PIE code because it is reserved by the ABI.
+ // For VxWorks ebx is reserved on 64-bit as well.
+#if defined(__x86_64__)
+ uint64_t rbx = ebx;
+ __asm__ __volatile__
+ (
+ "xchgq %%rbx, %0\n\t"
+ "cpuid\n\t"
+ "xchgq %%rbx, %0\n\t"
+ : "+DS" (rbx), "+a" (eax), "+c" (ecx), "+d" (edx)
+ );
+ ebx = static_cast< uint32_t >(rbx);
+#else // defined(__x86_64__)
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %0\n\t"
+ "cpuid\n\t"
+ "xchgl %%ebx, %0\n\t"
+ : "+DS" (ebx), "+a" (eax), "+c" (ecx), "+d" (edx)
+ );
+#endif // defined(__x86_64__)
+#else
+ __asm__ __volatile__
+ (
+ "cpuid\n\t"
+ : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx)
+ );
+#endif
+#elif defined(_MSC_VER)
+ int regs[4] = {};
+ __cpuid(regs, eax);
+ eax = regs[0];
+ ebx = regs[1];
+ ecx = regs[2];
+ edx = regs[3];
+#else
+#error "Boost.Atomic: Unsupported compiler, cpuid instruction cannot be generated"
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ARCH_X86
+
+#endif // BOOST_ATOMIC_CPUID_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/src/find_address.hpp b/contrib/restricted/boost/atomic/src/find_address.hpp
new file mode 100644
index 0000000000..c841c68e22
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/find_address.hpp
@@ -0,0 +1,45 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file find_address.hpp
+ *
+ * This file contains declaration of \c find_address algorithm
+ */
+
+#ifndef BOOST_ATOMIC_FIND_ADDRESS_HPP_INCLUDED_
+#define BOOST_ATOMIC_FIND_ADDRESS_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/predef/architecture/x86.h>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! \c find_address signature
+typedef std::size_t (find_address_t)(const volatile void* addr, const volatile void* const* addrs, std::size_t size);
+
+extern find_address_t find_address_generic;
+
+#if BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+extern find_address_t find_address_sse2;
+#if BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+extern find_address_t find_address_sse41;
+#endif // BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+#endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_FIND_ADDRESS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/src/find_address_sse2.cpp b/contrib/restricted/boost/atomic/src/find_address_sse2.cpp
new file mode 100644
index 0000000000..0739b33ca0
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/find_address_sse2.cpp
@@ -0,0 +1,284 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file find_address_sse2.cpp
+ *
+ * This file contains SSE2 implementation of the \c find_address algorithm
+ */
+
+#include <boost/predef/architecture/x86.h>
+#include <boost/atomic/detail/int_sizes.hpp>
+
+#if BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+
+#include <cstddef>
+#include <emmintrin.h>
+
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include "find_address.hpp"
+#include "x86_vector_tools.hpp"
+#include "bit_operation_tools.hpp"
+
+#include <boost/atomic/detail/header.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+namespace {
+
+BOOST_FORCEINLINE __m128i mm_pand_si128(__m128i mm1, __m128i mm2)
+{
+ // As of 2020, gcc, clang and icc prefer to generate andps instead of pand if the surrounding
+ // instructions pertain to FP domain, even if we use the _mm_and_si128 intrinsic. In our
+ // algorithm implementation, the FP instruction happen to be shufps, which is not actually
+ // restricted to FP domain (it is actually implemented in a separate MMX EU in Pentium 4 or
+ // a shuffle EU in INT domain in Core 2; on AMD K8/K10 all SSE instructions are implemented in
+ // FADD, FMUL and FMISC EUs regardless of INT/FP data types, and shufps is implemented in FADD/FMUL).
+ // In other words, there should be no domain bypass penalty between shufps and pand.
+ //
+ // This would usually not pose a problem since andps and pand have the same latency and throughput
+ // on most architectures of that age (before SSE4.1). However, it is possible that a newer architecture
+ // runs the SSE2 code path (e.g. because some weird compiler doesn't support SSE4.1 or because
+ // a hypervisor blocks SSE4.1 detection), and there pand may have a better throughput. For example,
+ // Sandy Bridge can execute 3 pand instructions per cycle, but only one andps. For this reason
+ // we prefer to generate pand and not andps.
+#if defined(__GNUC__)
+ __asm__("pand %1, %0\n\t" : "+x" (mm1) : "x" (mm2));
+#else
+ mm1 = _mm_and_si128(mm1, mm2);
+#endif
+ return mm1;
+}
+
+} // namespace
+#endif // BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+
+//! SSE2 implementation of the \c find_address algorithm
+std::size_t find_address_sse2(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+#if BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+
+ if (size < 12u)
+ return find_address_generic(addr, addrs, size);
+
+ const __m128i mm_addr = mm_set1_epiptr((uintptr_t)addr);
+ std::size_t pos = 0u;
+ const std::size_t n = (size + 1u) & ~static_cast< std::size_t >(1u);
+ for (std::size_t m = n & ~static_cast< std::size_t >(15u); pos < m; pos += 16u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+ __m128i mm3 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+ __m128i mm4 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 6u));
+ __m128i mm5 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 8u));
+ __m128i mm6 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 10u));
+ __m128i mm7 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 12u));
+ __m128i mm8 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 14u));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi32(mm2, mm_addr);
+ mm3 = _mm_cmpeq_epi32(mm3, mm_addr);
+ mm4 = _mm_cmpeq_epi32(mm4, mm_addr);
+ mm5 = _mm_cmpeq_epi32(mm5, mm_addr);
+ mm6 = _mm_cmpeq_epi32(mm6, mm_addr);
+ mm7 = _mm_cmpeq_epi32(mm7, mm_addr);
+ mm8 = _mm_cmpeq_epi32(mm8, mm_addr);
+
+ __m128i mm_mask1_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask1_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128i mm_mask2_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask2_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128i mm_mask3_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm5), _mm_castsi128_ps(mm6), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask3_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm5), _mm_castsi128_ps(mm6), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128i mm_mask4_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm7), _mm_castsi128_ps(mm8), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask4_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm7), _mm_castsi128_ps(mm8), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ mm_mask1_lo = mm_pand_si128(mm_mask1_lo, mm_mask1_hi);
+ mm_mask2_lo = mm_pand_si128(mm_mask2_lo, mm_mask2_hi);
+ mm_mask3_lo = mm_pand_si128(mm_mask3_lo, mm_mask3_hi);
+ mm_mask4_lo = mm_pand_si128(mm_mask4_lo, mm_mask4_hi);
+
+ mm_mask1_lo = _mm_packs_epi32(mm_mask1_lo, mm_mask2_lo);
+ mm_mask3_lo = _mm_packs_epi32(mm_mask3_lo, mm_mask4_lo);
+
+ mm_mask1_lo = _mm_packs_epi16(mm_mask1_lo, mm_mask3_lo);
+
+ uint32_t mask = _mm_movemask_epi8(mm_mask1_lo);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+ }
+
+ if ((n - pos) >= 8u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+ __m128i mm3 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+ __m128i mm4 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 6u));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi32(mm2, mm_addr);
+ mm3 = _mm_cmpeq_epi32(mm3, mm_addr);
+ mm4 = _mm_cmpeq_epi32(mm4, mm_addr);
+
+ __m128i mm_mask1_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask1_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ __m128i mm_mask2_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask2_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ mm_mask1_lo = mm_pand_si128(mm_mask1_lo, mm_mask1_hi);
+ mm_mask2_lo = mm_pand_si128(mm_mask2_lo, mm_mask2_hi);
+
+ mm_mask1_lo = _mm_packs_epi32(mm_mask1_lo, mm_mask2_lo);
+
+ uint32_t mask = _mm_movemask_epi8(mm_mask1_lo);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask) / 2u;
+ goto done;
+ }
+
+ pos += 8u;
+ }
+
+ if ((n - pos) >= 4u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi32(mm2, mm_addr);
+
+ __m128i mm_mask1_lo = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i mm_mask1_hi = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(3, 1, 3, 1)));
+
+ mm_mask1_lo = mm_pand_si128(mm_mask1_lo, mm_mask1_hi);
+
+ uint32_t mask = _mm_movemask_ps(_mm_castsi128_ps(mm_mask1_lo));
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+
+ pos += 4u;
+ }
+
+ if (pos < n)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ __m128i mm_mask = _mm_shuffle_epi32(mm1, _MM_SHUFFLE(2, 3, 0, 1));
+ mm_mask = mm_pand_si128(mm_mask, mm1);
+
+ uint32_t mask = _mm_movemask_pd(_mm_castsi128_pd(mm_mask));
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+
+ pos += 2u;
+ }
+
+done:
+ return pos;
+
+#else // BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+
+ if (size < 10u)
+ return find_address_generic(addr, addrs, size);
+
+ const __m128i mm_addr = _mm_set1_epi32((uintptr_t)addr);
+ std::size_t pos = 0u;
+ const std::size_t n = (size + 3u) & ~static_cast< std::size_t >(3u);
+ for (std::size_t m = n & ~static_cast< std::size_t >(15u); pos < m; pos += 16u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+ __m128i mm3 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 8u));
+ __m128i mm4 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 12u));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi32(mm2, mm_addr);
+ mm3 = _mm_cmpeq_epi32(mm3, mm_addr);
+ mm4 = _mm_cmpeq_epi32(mm4, mm_addr);
+
+ mm1 = _mm_packs_epi32(mm1, mm2);
+ mm3 = _mm_packs_epi32(mm3, mm4);
+
+ mm1 = _mm_packs_epi16(mm1, mm3);
+
+ uint32_t mask = _mm_movemask_epi8(mm1);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+ }
+
+ if ((n - pos) >= 8u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi32(mm2, mm_addr);
+
+ mm1 = _mm_packs_epi32(mm1, mm2);
+
+ uint32_t mask = _mm_movemask_epi8(mm1);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask) / 2u;
+ goto done;
+ }
+
+ pos += 8u;
+ }
+
+ if (pos < n)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+
+ mm1 = _mm_cmpeq_epi32(mm1, mm_addr);
+
+ uint32_t mask = _mm_movemask_ps(_mm_castsi128_ps(mm1));
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+
+ pos += 4u;
+ }
+
+done:
+ return pos;
+
+#endif // BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
diff --git a/contrib/restricted/boost/atomic/src/find_address_sse41.cpp b/contrib/restricted/boost/atomic/src/find_address_sse41.cpp
new file mode 100644
index 0000000000..1db1b74335
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/find_address_sse41.cpp
@@ -0,0 +1,154 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file find_address_sse41.cpp
+ *
+ * This file contains SSE4.1 implementation of the \c find_address algorithm
+ */
+
+#include <boost/predef/architecture/x86.h>
+#include <boost/atomic/detail/int_sizes.hpp>
+
+#if BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8)
+
+#include <cstddef>
+#include <smmintrin.h>
+
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include "find_address.hpp"
+#include "x86_vector_tools.hpp"
+#include "bit_operation_tools.hpp"
+
+#include <boost/atomic/detail/header.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! SSE4.1 implementation of the \c find_address algorithm
+std::size_t find_address_sse41(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+ if (size < 12u)
+ return find_address_generic(addr, addrs, size);
+
+ const __m128i mm_addr = mm_set1_epiptr((uintptr_t)addr);
+ std::size_t pos = 0u;
+ const std::size_t n = (size + 1u) & ~static_cast< std::size_t >(1u);
+ for (std::size_t m = n & ~static_cast< std::size_t >(15u); pos < m; pos += 16u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+ __m128i mm3 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+ __m128i mm4 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 6u));
+ __m128i mm5 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 8u));
+ __m128i mm6 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 10u));
+ __m128i mm7 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 12u));
+ __m128i mm8 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 14u));
+
+ mm1 = _mm_cmpeq_epi64(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi64(mm2, mm_addr);
+ mm3 = _mm_cmpeq_epi64(mm3, mm_addr);
+ mm4 = _mm_cmpeq_epi64(mm4, mm_addr);
+ mm5 = _mm_cmpeq_epi64(mm5, mm_addr);
+ mm6 = _mm_cmpeq_epi64(mm6, mm_addr);
+ mm7 = _mm_cmpeq_epi64(mm7, mm_addr);
+ mm8 = _mm_cmpeq_epi64(mm8, mm_addr);
+
+ mm1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+ mm3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(2, 0, 2, 0)));
+ mm5 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm5), _mm_castsi128_ps(mm6), _MM_SHUFFLE(2, 0, 2, 0)));
+ mm7 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm7), _mm_castsi128_ps(mm8), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ mm1 = _mm_packs_epi32(mm1, mm3);
+ mm5 = _mm_packs_epi32(mm5, mm7);
+
+ mm1 = _mm_packs_epi16(mm1, mm5);
+
+ uint32_t mask = _mm_movemask_epi8(mm1);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+ }
+
+ if ((n - pos) >= 8u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+ __m128i mm3 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 4u));
+ __m128i mm4 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 6u));
+
+ mm1 = _mm_cmpeq_epi64(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi64(mm2, mm_addr);
+ mm3 = _mm_cmpeq_epi64(mm3, mm_addr);
+ mm4 = _mm_cmpeq_epi64(mm4, mm_addr);
+
+ mm1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+ mm3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm3), _mm_castsi128_ps(mm4), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ mm1 = _mm_packs_epi32(mm1, mm3);
+
+ uint32_t mask = _mm_movemask_epi8(mm1);
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask) / 2u;
+ goto done;
+ }
+
+ pos += 8u;
+ }
+
+ if ((n - pos) >= 4u)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+ __m128i mm2 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos + 2u));
+
+ mm1 = _mm_cmpeq_epi64(mm1, mm_addr);
+ mm2 = _mm_cmpeq_epi64(mm2, mm_addr);
+
+ mm1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(mm1), _mm_castsi128_ps(mm2), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ uint32_t mask = _mm_movemask_ps(_mm_castsi128_ps(mm1));
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+
+ pos += 4u;
+ }
+
+ if (pos < n)
+ {
+ __m128i mm1 = _mm_load_si128(reinterpret_cast< const __m128i* >(addrs + pos));
+
+ mm1 = _mm_cmpeq_epi64(mm1, mm_addr);
+ uint32_t mask = _mm_movemask_pd(_mm_castsi128_pd(mm1));
+ if (mask)
+ {
+ pos += atomics::detail::count_trailing_zeros(mask);
+ goto done;
+ }
+
+ pos += 2u;
+ }
+
+done:
+ return pos;
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8)
diff --git a/contrib/restricted/boost/atomic/src/lock_pool.cpp b/contrib/restricted/boost/atomic/src/lock_pool.cpp
new file mode 100644
index 0000000000..7424338997
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/lock_pool.cpp
@@ -0,0 +1,1414 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013-2014, 2020 Andrey Semashev
+ */
+/*!
+ * \file lock_pool.cpp
+ *
+ * This file contains implementation of the lock pool used to emulate atomic ops.
+ */
+
+#include <boost/predef/os/windows.h>
+#if BOOST_OS_WINDOWS
+// Include boost/winapi/config.hpp first to make sure target Windows version is selected by Boost.WinAPI
+#include <boost/winapi/config.hpp>
+#include <boost/predef/platform.h>
+#endif
+#include <boost/predef/architecture/x86.h>
+#include <boost/predef/hardware/simd/x86.h>
+
+#include <cstddef>
+#include <cstring>
+#include <cstdlib>
+#include <new>
+#include <limits>
+#include <boost/config.hpp>
+#include <boost/assert.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/aligned_variable.hpp>
+#include <boost/atomic/detail/core_operations.hpp>
+#include <boost/atomic/detail/extra_operations.hpp>
+#include <boost/atomic/detail/fence_operations.hpp>
+#include <boost/atomic/detail/lock_pool.hpp>
+#include <boost/atomic/detail/pause.hpp>
+#include <boost/atomic/detail/once_flag.hpp>
+#include <boost/atomic/detail/type_traits/alignment_of.hpp>
+
+#include <boost/align/aligned_alloc.hpp>
+
+#include <boost/preprocessor/config/limits.hpp>
+#include <boost/preprocessor/iteration/iterate.hpp>
+
+#if BOOST_OS_WINDOWS
+#include <boost/winapi/basic_types.hpp>
+#include <boost/winapi/thread.hpp>
+#include <boost/winapi/wait_constants.hpp>
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+#include <boost/winapi/srw_lock.hpp>
+#include <boost/winapi/condition_variable.hpp>
+#else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+#include <boost/winapi/critical_section.hpp>
+#include <boost/winapi/semaphore.hpp>
+#include <boost/winapi/handles.hpp>
+#include <boost/winapi/wait.hpp>
+#endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+#define BOOST_ATOMIC_USE_WINAPI
+#else // BOOST_OS_WINDOWS
+#include <boost/atomic/detail/futex.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX) && BOOST_ATOMIC_INT32_LOCK_FREE == 2
+#define BOOST_ATOMIC_USE_FUTEX
+#else // BOOST_OS_LINUX
+#include <pthread.h>
+#define BOOST_ATOMIC_USE_PTHREAD
+#endif // BOOST_OS_LINUX
+#include <cerrno>
+#endif // BOOST_OS_WINDOWS
+
+#include "find_address.hpp"
+
+#if BOOST_ARCH_X86 && (defined(BOOST_ATOMIC_USE_SSE2) || defined(BOOST_ATOMIC_USE_SSE41)) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && \
+ (\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 && BOOST_HW_SIMD_X86 < BOOST_HW_SIMD_X86_SSE4_1_VERSION) || \
+ (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4 && BOOST_HW_SIMD_X86 < BOOST_HW_SIMD_X86_SSE2_VERSION) \
+ )
+#include "cpuid.hpp"
+#define BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH
+#endif
+
+#include <boost/atomic/detail/header.hpp>
+
+// Cache line size, in bytes
+// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
+#if defined(__s390__) || defined(__s390x__)
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 256
+#elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 128
+#else
+#define BOOST_ATOMIC_CACHE_LINE_SIZE 64
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! \c find_address generic implementation
+std::size_t find_address_generic(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+ for (std::size_t i = 0u; i < size; ++i)
+ {
+ if (addrs[i] == addr)
+ return i;
+ }
+
+ return size;
+}
+
+namespace lock_pool {
+
+namespace {
+
+#if BOOST_ARCH_X86 && (defined(BOOST_ATOMIC_USE_SSE2) || defined(BOOST_ATOMIC_USE_SSE41)) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+
+typedef atomics::detail::core_operations< sizeof(find_address_t*), false, false > func_ptr_operations;
+BOOST_STATIC_ASSERT_MSG(func_ptr_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for function pointers");
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
+std::size_t find_address_dispatch(const volatile void* addr, const volatile void* const* addrs, std::size_t size);
+#endif
+
+union find_address_ptr
+{
+ find_address_t* as_ptr;
+ func_ptr_operations::storage_type as_storage;
+}
+g_find_address =
+{
+#if defined(BOOST_ATOMIC_USE_SSE41) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 && BOOST_HW_SIMD_X86 >= BOOST_HW_SIMD_X86_SSE4_1_VERSION
+ &find_address_sse41
+#elif defined(BOOST_ATOMIC_USE_SSE2) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4 && BOOST_HW_SIMD_X86 >= BOOST_HW_SIMD_X86_SSE2_VERSION
+ &find_address_sse2
+#else
+ &find_address_dispatch
+#endif
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
+
+std::size_t find_address_dispatch(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+ find_address_t* find_addr = &find_address_generic;
+
+#if defined(BOOST_ATOMIC_USE_SSE2)
+ // First, check the max available cpuid function
+ uint32_t eax = 0u, ebx = 0u, ecx = 0u, edx = 0u;
+ atomics::detail::cpuid(eax, ebx, ecx, edx);
+
+ const uint32_t max_cpuid_function = eax;
+ if (max_cpuid_function >= 1u)
+ {
+ // Obtain CPU features
+ eax = 1u;
+ ebx = ecx = edx = 0u;
+ atomics::detail::cpuid(eax, ebx, ecx, edx);
+
+ if ((edx & (1u << 26)) != 0u)
+ find_addr = &find_address_sse2;
+
+#if defined(BOOST_ATOMIC_USE_SSE41) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
+ if ((ecx & (1u << 19)) != 0u)
+ find_addr = &find_address_sse41;
+#endif
+ }
+#endif // defined(BOOST_ATOMIC_USE_SSE2)
+
+ find_address_ptr ptr = {};
+ ptr.as_ptr = find_addr;
+ func_ptr_operations::store(g_find_address.as_storage, ptr.as_storage, boost::memory_order_relaxed);
+
+ return find_addr(addr, addrs, size);
+}
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
+
+inline std::size_t find_address(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+ find_address_ptr ptr;
+ ptr.as_storage = func_ptr_operations::load(g_find_address.as_storage, boost::memory_order_relaxed);
+ return ptr.as_ptr(addr, addrs, size);
+}
+
+#else // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+
+inline std::size_t find_address(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
+{
+ return atomics::detail::find_address_generic(addr, addrs, size);
+}
+
+#endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
+
+struct wait_state;
+struct lock_state;
+
+//! Base class for a wait state
+struct wait_state_base
+{
+ //! Number of waiters referencing this state
+ std::size_t m_ref_count;
+ //! Index of this wait state in the list
+ std::size_t m_index;
+
+ explicit wait_state_base(std::size_t index) BOOST_NOEXCEPT :
+ m_ref_count(0u),
+ m_index(index)
+ {
+ }
+
+ BOOST_DELETED_FUNCTION(wait_state_base(wait_state_base const&))
+ BOOST_DELETED_FUNCTION(wait_state_base& operator= (wait_state_base const&))
+};
+
+//! List of wait states. Must be a POD structure.
+struct wait_state_list
+{
+ //! List header
+ struct header
+ {
+ //! List size
+ std::size_t size;
+ //! List capacity
+ std::size_t capacity;
+ };
+
+ /*!
+ * \brief Pointer to the list header
+ *
+ * The list buffer consists of three adjacent areas: header object, array of atomic pointers and array of pointers to the wait_state structures.
+ * Each of the arrays have header.capacity elements, of which the first header.size elements correspond to the currently ongoing wait operations
+ * and the rest are spare elements. Spare wait_state structures may still be allocated (in which case the wait_state pointer is not null) and
+ * can be reused on future requests. Spare atomic pointers are null and unused.
+ *
+ * This memory layout was designed to optimize wait state lookup by atomic address and also support memory pooling to reduce dynamic memory allocations.
+ */
+ header* m_header;
+ //! The flag indicates that memory pooling is disabled. Set on process cleanup.
+ bool m_free_memory;
+
+ //! Buffer alignment, in bytes
+ static BOOST_CONSTEXPR_OR_CONST std::size_t buffer_alignment = 16u;
+ //! Alignment of pointer arrays in the buffer, in bytes. This should align atomic pointers to the vector size used in \c find_address implementation.
+ static BOOST_CONSTEXPR_OR_CONST std::size_t entries_alignment = atomics::detail::alignment_of< void* >::value < 16u ? 16u : atomics::detail::alignment_of< void* >::value;
+ //! Offset from the list header to the beginning of the array of atomic pointers in the buffer, in bytes
+ static BOOST_CONSTEXPR_OR_CONST std::size_t entries_offset = (sizeof(header) + entries_alignment - 1u) & ~static_cast< std::size_t >(entries_alignment - 1u);
+ //! Initial buffer capacity, in elements. This should be at least as large as a vector size used in \c find_address implementation.
+ static BOOST_CONSTEXPR_OR_CONST std::size_t initial_capacity = (16u / sizeof(void*)) < 2u ? 2u : (16u / sizeof(void*));
+
+ //! Returns a pointer to the array of atomic pointers
+ static const volatile void** get_atomic_pointers(header* p) BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(p != NULL);
+ return reinterpret_cast< const volatile void** >(reinterpret_cast< unsigned char* >(p) + entries_offset);
+ }
+
+ //! Returns a pointer to the array of atomic pointers
+ const volatile void** get_atomic_pointers() const BOOST_NOEXCEPT
+ {
+ return get_atomic_pointers(m_header);
+ }
+
+ //! Returns a pointer to the array of pointers to the wait states
+ static wait_state** get_wait_states(const volatile void** ptrs, std::size_t capacity) BOOST_NOEXCEPT
+ {
+ return reinterpret_cast< wait_state** >(const_cast< void** >(ptrs + capacity));
+ }
+
+ //! Returns a pointer to the array of pointers to the wait states
+ static wait_state** get_wait_states(header* p) BOOST_NOEXCEPT
+ {
+ return get_wait_states(get_atomic_pointers(p), p->capacity);
+ }
+
+ //! Returns a pointer to the array of pointers to the wait states
+ wait_state** get_wait_states() const BOOST_NOEXCEPT
+ {
+ return get_wait_states(m_header);
+ }
+
+ //! Finds an element with the given pointer to the atomic object
+ wait_state* find(const volatile void* addr) const BOOST_NOEXCEPT
+ {
+ wait_state* ws = NULL;
+ if (BOOST_LIKELY(m_header != NULL))
+ {
+ const volatile void* const* addrs = get_atomic_pointers();
+ const std::size_t size = m_header->size;
+ std::size_t pos = find_address(addr, addrs, size);
+ if (pos < size)
+ ws = get_wait_states()[pos];
+ }
+
+ return ws;
+ }
+
+ //! Finds an existing element with the given pointer to the atomic object or allocates a new one. Returns NULL in case of failure.
+ wait_state* find_or_create(const volatile void* addr) BOOST_NOEXCEPT;
+ //! Releases the previously created wait state
+ void erase(wait_state* w) BOOST_NOEXCEPT;
+
+ //! Deallocates spare entries and the list buffer if no allocated entries are left
+ void free_spare() BOOST_NOEXCEPT;
+ //! Allocates new buffer for the list entries. Returns NULL in case of failure.
+ static header* allocate_buffer(std::size_t new_capacity, header* old_header = NULL) BOOST_NOEXCEPT;
+};
+
+#define BOOST_ATOMIC_WAIT_STATE_LIST_INIT { NULL, false }
+
+// In the platform-specific definitions below, lock_state must be a POD structure and wait_state must derive from wait_state_base.
+
+#if defined(BOOST_ATOMIC_USE_PTHREAD)
+
+//! State of a wait operation associated with an atomic object
+struct wait_state :
+ public wait_state_base
+{
+ //! Condition variable
+ pthread_cond_t m_cond;
+
+ explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
+ wait_state_base(index)
+ {
+ BOOST_VERIFY(pthread_cond_init(&m_cond, NULL) == 0);
+ }
+
+ ~wait_state() BOOST_NOEXCEPT
+ {
+ pthread_cond_destroy(&m_cond);
+ }
+
+ //! Blocks in the wait operation until notified
+ void wait(lock_state& state) BOOST_NOEXCEPT;
+
+ //! Wakes up one thread blocked in the wait operation
+ void notify_one(lock_state&) BOOST_NOEXCEPT
+ {
+ BOOST_VERIFY(pthread_cond_signal(&m_cond) == 0);
+ }
+ //! Wakes up all threads blocked in the wait operation
+ void notify_all(lock_state&) BOOST_NOEXCEPT
+ {
+ BOOST_VERIFY(pthread_cond_broadcast(&m_cond) == 0);
+ }
+};
+
+//! Lock pool entry
+struct lock_state
+{
+ //! Mutex
+ pthread_mutex_t m_mutex;
+ //! Wait states
+ wait_state_list m_wait_states;
+
+ //! Locks the mutex for a short duration
+ void short_lock() BOOST_NOEXCEPT
+ {
+ long_lock();
+ }
+
+ //! Locks the mutex for a long duration
+ void long_lock() BOOST_NOEXCEPT
+ {
+ for (unsigned int i = 0u; i < 5u; ++i)
+ {
+ if (BOOST_LIKELY(pthread_mutex_trylock(&m_mutex) == 0))
+ return;
+
+ atomics::detail::pause();
+ }
+
+ BOOST_VERIFY(pthread_mutex_lock(&m_mutex) == 0);
+ }
+
+ //! Unlocks the mutex
+ void unlock() BOOST_NOEXCEPT
+ {
+ BOOST_VERIFY(pthread_mutex_unlock(&m_mutex) == 0);
+ }
+};
+
+#define BOOST_ATOMIC_LOCK_STATE_INIT { PTHREAD_MUTEX_INITIALIZER, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+
+//! Blocks in the wait operation until notified
+inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
+{
+ BOOST_VERIFY(pthread_cond_wait(&m_cond, &state.m_mutex) == 0);
+}
+
+#elif defined(BOOST_ATOMIC_USE_FUTEX)
+
+typedef atomics::detail::core_operations< 4u, false, false > futex_operations;
+// The storage type must be a 32-bit object, as required by futex API
+BOOST_STATIC_ASSERT_MSG(futex_operations::is_always_lock_free && sizeof(futex_operations::storage_type) == 4u, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
+typedef atomics::detail::extra_operations< futex_operations, futex_operations::storage_size, futex_operations::is_signed > futex_extra_operations;
+
+namespace mutex_bits {
+
+//! The bit indicates a locked mutex
+BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type locked = 1u;
+//! The bit indicates that there is at least one thread blocked waiting for the mutex to be released
+BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type contended = 1u << 1;
+//! The lowest bit of the counter bits used to mitigate ABA problem. This and any higher bits in the mutex state constitute the counter.
+BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type counter_one = 1u << 2;
+
+} // namespace mutex_bits
+
+//! State of a wait operation associated with an atomic object
+struct wait_state :
+ public wait_state_base
+{
+ //! Condition variable futex. Used as the counter of notify calls.
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_cond);
+ //! Number of currently blocked waiters
+ futex_operations::storage_type m_waiter_count;
+
+ explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
+ wait_state_base(index),
+ m_cond(0u),
+ m_waiter_count(0u)
+ {
+ }
+
+ //! Blocks in the wait operation until notified
+ void wait(lock_state& state) BOOST_NOEXCEPT;
+
+ //! Wakes up one thread blocked in the wait operation
+ void notify_one(lock_state& state) BOOST_NOEXCEPT;
+ //! Wakes up all threads blocked in the wait operation
+ void notify_all(lock_state& state) BOOST_NOEXCEPT;
+};
+
+//! Lock pool entry
+struct lock_state
+{
+ //! Mutex futex
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_mutex);
+ //! Wait states
+ wait_state_list m_wait_states;
+
+ //! Locks the mutex for a short duration
+ void short_lock() BOOST_NOEXCEPT
+ {
+ long_lock();
+ }
+
+ //! Locks the mutex for a long duration
+ void long_lock() BOOST_NOEXCEPT
+ {
+ for (unsigned int i = 0u; i < 10u; ++i)
+ {
+ futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
+ if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
+ {
+ futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
+ if (BOOST_LIKELY(futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
+ return;
+ }
+
+ atomics::detail::pause();
+ }
+
+ lock_slow_path();
+ }
+
+ //! Locks the mutex for a long duration
+ void lock_slow_path() BOOST_NOEXCEPT
+ {
+ futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
+ while (true)
+ {
+ if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
+ {
+ futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
+ if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
+ return;
+ }
+ else
+ {
+ futex_operations::storage_type new_state = prev_state | mutex_bits::contended;
+ if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed)))
+ {
+ atomics::detail::futex_wait_private(&m_mutex, new_state);
+ prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
+ }
+ }
+ }
+ }
+
+ //! Unlocks the mutex
+ void unlock() BOOST_NOEXCEPT
+ {
+ futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
+ futex_operations::storage_type new_state;
+ while (true)
+ {
+ new_state = (prev_state & (~mutex_bits::locked)) + mutex_bits::counter_one;
+ if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_release, boost::memory_order_relaxed)))
+ break;
+ }
+
+ if ((prev_state & mutex_bits::contended) != 0u)
+ {
+ int woken_count = atomics::detail::futex_signal_private(&m_mutex);
+ if (woken_count == 0)
+ {
+ prev_state = new_state;
+ new_state &= ~mutex_bits::contended;
+ futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed);
+ }
+ }
+ }
+};
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+#define BOOST_ATOMIC_LOCK_STATE_INIT { 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+#else
+#define BOOST_ATOMIC_LOCK_STATE_INIT { { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+#endif
+
+//! Blocks in the wait operation until notified
+inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
+{
+ const futex_operations::storage_type prev_cond = m_cond;
+ ++m_waiter_count;
+
+ state.unlock();
+
+ while (true)
+ {
+ int err = atomics::detail::futex_wait_private(&m_cond, prev_cond);
+ if (BOOST_LIKELY(err != EINTR))
+ break;
+ }
+
+ state.long_lock();
+
+ --m_waiter_count;
+}
+
+//! Wakes up one thread blocked in the wait operation
+inline void wait_state::notify_one(lock_state& state) BOOST_NOEXCEPT
+{
+ ++m_cond;
+
+ if (BOOST_LIKELY(m_waiter_count > 0u))
+ {
+ // Move one blocked thread to the mutex futex and mark the mutex contended so that the thread is unblocked on unlock()
+ atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u, 1u);
+ futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
+ }
+}
+
+//! Wakes up all threads blocked in the wait operation
+inline void wait_state::notify_all(lock_state& state) BOOST_NOEXCEPT
+{
+ ++m_cond;
+
+ if (BOOST_LIKELY(m_waiter_count > 0u))
+ {
+ // Move blocked threads to the mutex futex and mark the mutex contended so that a thread is unblocked on unlock()
+ atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u);
+ futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
+ }
+}
+
+#else
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+
+//! State of a wait operation associated with an atomic object
+struct wait_state :
+ public wait_state_base
+{
+ //! Condition variable
+ boost::winapi::CONDITION_VARIABLE_ m_cond;
+
+ explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
+ wait_state_base(index)
+ {
+ boost::winapi::InitializeConditionVariable(&m_cond);
+ }
+
+ //! Blocks in the wait operation until notified
+ void wait(lock_state& state) BOOST_NOEXCEPT;
+
+ //! Wakes up one thread blocked in the wait operation
+ void notify_one(lock_state&) BOOST_NOEXCEPT
+ {
+ boost::winapi::WakeConditionVariable(&m_cond);
+ }
+ //! Wakes up all threads blocked in the wait operation
+ void notify_all(lock_state&) BOOST_NOEXCEPT
+ {
+ boost::winapi::WakeAllConditionVariable(&m_cond);
+ }
+};
+
+//! Lock pool entry
+struct lock_state
+{
+ //! Mutex
+ boost::winapi::SRWLOCK_ m_mutex;
+ //! Wait states
+ wait_state_list m_wait_states;
+
+ //! Locks the mutex for a short duration
+ void short_lock() BOOST_NOEXCEPT
+ {
+ long_lock();
+ }
+
+ //! Locks the mutex for a long duration
+ void long_lock() BOOST_NOEXCEPT
+ {
+ // Presumably, AcquireSRWLockExclusive already implements spinning internally, so there's no point in doing this ourselves.
+ boost::winapi::AcquireSRWLockExclusive(&m_mutex);
+ }
+
+ //! Unlocks the mutex
+ void unlock() BOOST_NOEXCEPT
+ {
+ boost::winapi::ReleaseSRWLockExclusive(&m_mutex);
+ }
+};
+
+#define BOOST_ATOMIC_LOCK_STATE_INIT { BOOST_WINAPI_SRWLOCK_INIT, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+
+//! Blocks in the wait operation until notified
+inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
+{
+ boost::winapi::SleepConditionVariableSRW(&m_cond, &state.m_mutex, boost::winapi::infinite, 0u);
+}
+
+#else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+
+typedef atomics::detail::core_operations< 4u, false, false > mutex_operations;
+BOOST_STATIC_ASSERT_MSG(mutex_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
+
+namespace fallback_mutex_bits {
+
+//! The bit indicates a locked mutex
+BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type locked = 1u;
+//! The bit indicates that the critical section is initialized and should be used instead of the fallback mutex
+BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type critical_section_initialized = 1u << 1;
+
+} // namespace mutex_bits
+
+//! State of a wait operation associated with an atomic object
+struct wait_state :
+ public wait_state_base
+{
+ /*!
+ * \brief A semaphore used to block one or more threads
+ *
+ * A semaphore can be used to block a thread if it has no ongoing notifications (i.e. \c m_notify_count is 0).
+ * If there is no such semaphore, the thread has to allocate a new one to block on. This is to guarantee
+ * that a thread that is blocked after a notification is not immediately released by the semaphore while
+ * there are previously blocked threads.
+ *
+ * Semaphores are organized in a circular doubly linked list. A single semaphore object represents a list
+ * of one semaphore and is said to be "singular".
+ */
+ struct semaphore
+ {
+ //! Pointer to the next semaphore in the list
+ semaphore* m_next;
+ //! Pointer to the previous semaphore in the list
+ semaphore* m_prev;
+
+ //! Semaphore handle
+ boost::winapi::HANDLE_ m_semaphore;
+ //! Number of threads blocked on the semaphore
+ boost::winapi::ULONG_ m_waiter_count;
+ //! Number of threads released by notifications
+ boost::winapi::ULONG_ m_notify_count;
+
+ semaphore() BOOST_NOEXCEPT :
+ m_semaphore(boost::winapi::create_anonymous_semaphore(NULL, 0, (std::numeric_limits< boost::winapi::LONG_ >::max)())),
+ m_waiter_count(0u),
+ m_notify_count(0u)
+ {
+ m_next = m_prev = this;
+ }
+
+ ~semaphore() BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(is_singular());
+
+ if (BOOST_LIKELY(m_semaphore != boost::winapi::invalid_handle_value))
+ boost::winapi::CloseHandle(m_semaphore);
+ }
+
+ //! Creates a new semaphore or returns null in case of failure
+ static semaphore* create() BOOST_NOEXCEPT
+ {
+ semaphore* p = new (std::nothrow) semaphore();
+ if (BOOST_UNLIKELY(p != NULL && p->m_semaphore == boost::winapi::invalid_handle_value))
+ {
+ delete p;
+ p = NULL;
+ }
+ return p;
+ }
+
+ //! Returns \c true if the semaphore is the single element of the list
+ bool is_singular() const BOOST_NOEXCEPT
+ {
+ return m_next == this /* && m_prev == this */;
+ }
+
+ //! Inserts the semaphore list after the specified other semaphore
+ void link_after(semaphore* that) BOOST_NOEXCEPT
+ {
+ link_before(that->m_next);
+ }
+
+ //! Inserts the semaphore list before the specified other semaphore
+ void link_before(semaphore* that) BOOST_NOEXCEPT
+ {
+ semaphore* prev = that->m_prev;
+ that->m_prev = m_prev;
+ m_prev->m_next = that;
+ m_prev = prev;
+ prev->m_next = this;
+ }
+
+ //! Removes the semaphore from the list
+ void unlink() BOOST_NOEXCEPT
+ {
+ // Load pointers beforehand, in case we are the only element in the list
+ semaphore* next = m_next;
+ semaphore* prev = m_prev;
+ prev->m_next = next;
+ next->m_prev = prev;
+ m_next = m_prev = this;
+ }
+
+ BOOST_DELETED_FUNCTION(semaphore(semaphore const&))
+ BOOST_DELETED_FUNCTION(semaphore& operator= (semaphore const&))
+ };
+
+ //! Doubly linked circular list of semaphores
+ class semaphore_list
+ {
+ private:
+ semaphore* m_head;
+
+ public:
+ semaphore_list() BOOST_NOEXCEPT :
+ m_head(NULL)
+ {
+ }
+
+ //! Returns \c true if the list is empty
+ bool empty() const BOOST_NOEXCEPT
+ {
+ return m_head == NULL;
+ }
+
+ //! Returns the first semaphore in the list
+ semaphore* front() const BOOST_NOEXCEPT
+ {
+ return m_head;
+ }
+
+ //! Returns the first semaphore in the list and leaves the list empty
+ semaphore* eject() BOOST_NOEXCEPT
+ {
+ semaphore* sem = m_head;
+ m_head = NULL;
+ return sem;
+ }
+
+ //! Inserts the semaphore at the beginning of the list
+ void push_front(semaphore* sem) BOOST_NOEXCEPT
+ {
+ if (m_head)
+ sem->link_before(m_head);
+
+ m_head = sem;
+ }
+
+ //! Removes the first semaphore from the beginning of the list
+ semaphore* pop_front() BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(!empty());
+ semaphore* sem = m_head;
+ erase(sem);
+ return sem;
+ }
+
+ //! Removes the semaphore from the list
+ void erase(semaphore* sem) BOOST_NOEXCEPT
+ {
+ if (sem->is_singular())
+ {
+ BOOST_ASSERT(m_head == sem);
+ m_head = NULL;
+ }
+ else
+ {
+ if (m_head == sem)
+ m_head = sem->m_next;
+ sem->unlink();
+ }
+ }
+
+ BOOST_DELETED_FUNCTION(semaphore_list(semaphore_list const&))
+ BOOST_DELETED_FUNCTION(semaphore_list& operator= (semaphore_list const&))
+ };
+
+ //! List of semaphores used for notifying. Here, every semaphore has m_notify_count > 0 && m_waiter_count > 0.
+ semaphore_list m_notify_semaphores;
+ //! List of semaphores used for waiting. Here, every semaphore has m_notify_count == 0 && m_waiter_count > 0.
+ semaphore_list m_wait_semaphores;
+ //! List of free semaphores. Here, every semaphore has m_notify_count == 0 && m_waiter_count == 0.
+ semaphore_list m_free_semaphores;
+
+ explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
+ wait_state_base(index)
+ {
+ }
+
+ ~wait_state() BOOST_NOEXCEPT
+ {
+ // All wait and notification operations must have been completed
+ BOOST_ASSERT(m_notify_semaphores.empty());
+ BOOST_ASSERT(m_wait_semaphores.empty());
+
+ semaphore* sem = m_free_semaphores.eject();
+ if (sem)
+ {
+ while (true)
+ {
+ bool was_last = sem->is_singular();
+ semaphore* next = sem->m_next;
+ sem->unlink();
+
+ delete sem;
+
+ if (was_last)
+ break;
+
+ sem = next;
+ }
+ }
+ }
+
+ //! Blocks in the wait operation until notified
+ void wait(lock_state& state) BOOST_NOEXCEPT;
+ //! Fallback implementation of wait
+ void wait_fallback(lock_state& state) BOOST_NOEXCEPT;
+
+ //! Wakes up one thread blocked in the wait operation
+ void notify_one(lock_state&) BOOST_NOEXCEPT
+ {
+ if (m_notify_semaphores.empty())
+ {
+ if (m_wait_semaphores.empty())
+ return;
+
+ // Move the semaphore with waiters to the notify list
+ m_notify_semaphores.push_front(m_wait_semaphores.pop_front());
+ }
+
+ semaphore* sem = m_notify_semaphores.front();
+ ++sem->m_notify_count;
+
+ if (sem->m_notify_count == sem->m_waiter_count)
+ {
+ // Remove this semaphore from the list. The waiter will re-insert it into the waiter or free list once there are no more pending notifications in it.
+ m_notify_semaphores.erase(sem);
+ }
+
+ boost::winapi::ReleaseSemaphore(sem->m_semaphore, 1, NULL);
+ }
+
+ //! Wakes up all threads blocked in the wait operation
+ void notify_all(lock_state&) BOOST_NOEXCEPT
+ {
+ // Combine all notify and waiter semaphores in one list
+ semaphore* sem = m_notify_semaphores.eject();
+ if (sem)
+ {
+ if (!m_wait_semaphores.empty())
+ {
+ m_wait_semaphores.eject()->link_before(sem);
+ }
+ }
+ else
+ {
+ sem = m_wait_semaphores.eject();
+ }
+
+ if (sem)
+ {
+ while (true)
+ {
+ bool was_last = sem->is_singular();
+ semaphore* next = sem->m_next;
+ sem->unlink();
+
+ boost::winapi::ULONG_ count = sem->m_waiter_count - sem->m_notify_count;
+ sem->m_notify_count += count;
+
+ boost::winapi::ReleaseSemaphore(sem->m_semaphore, count, NULL);
+
+ if (was_last)
+ break;
+
+ sem = next;
+ }
+ }
+ }
+};
+
+//! Lock pool entry
+struct lock_state
+{
+ //! Mutex
+ boost::winapi::CRITICAL_SECTION_ m_mutex;
+ //! Fallback mutex. Used as indicator of critical section initialization state and a fallback mutex, if critical section cannot be initialized.
+ BOOST_ATOMIC_DETAIL_ALIGNED_VAR(mutex_operations::storage_alignment, mutex_operations::storage_type, m_mutex_fallback);
+ //! Wait states
+ wait_state_list m_wait_states;
+
+ //! Locks the mutex for a short duration
+ void short_lock() BOOST_NOEXCEPT
+ {
+ long_lock();
+ }
+
+ //! Locks the mutex for a long duration
+ void long_lock() BOOST_NOEXCEPT
+ {
+ mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
+ while (true)
+ {
+ if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
+ {
+ lock_cs:
+ boost::winapi::EnterCriticalSection(&m_mutex);
+ return;
+ }
+
+ while (fallback_state == 0u)
+ {
+ if (!mutex_operations::compare_exchange_weak(m_mutex_fallback, fallback_state, fallback_mutex_bits::locked, boost::memory_order_acquire, boost::memory_order_relaxed))
+ continue;
+
+ if (BOOST_LIKELY(!!boost::winapi::InitializeCriticalSectionAndSpinCount(&m_mutex, 100u)))
+ {
+ mutex_operations::store(m_mutex_fallback, fallback_mutex_bits::critical_section_initialized, boost::memory_order_release);
+ goto lock_cs;
+ }
+
+ // We failed to init the critical section, leave the fallback mutex locked and return
+ return;
+ }
+
+ if (fallback_state == fallback_mutex_bits::locked)
+ {
+ // Wait intil the fallback mutex is unlocked
+ boost::winapi::SwitchToThread();
+ fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
+ }
+ }
+ }
+
+ //! Unlocks the mutex
+ void unlock() BOOST_NOEXCEPT
+ {
+ mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
+ if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
+ {
+ boost::winapi::LeaveCriticalSection(&m_mutex);
+ return;
+ }
+
+ mutex_operations::store(m_mutex_fallback, 0u, boost::memory_order_release);
+ }
+};
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
+#define BOOST_ATOMIC_LOCK_STATE_INIT { {}, 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+#else
+#define BOOST_ATOMIC_LOCK_STATE_INIT { {}, { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
+#endif
+
+//! Blocks in the wait operation until notified
+inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
+{
+ // Find a semaphore to block on
+ semaphore* sem = m_wait_semaphores.front();
+ if (sem)
+ {
+ while (sem->m_waiter_count >= static_cast< boost::winapi::ULONG_ >((std::numeric_limits< boost::winapi::LONG_ >::max)()))
+ {
+ if (sem->m_next == m_wait_semaphores.front())
+ {
+ sem = NULL;
+ break;
+ }
+
+ sem = sem->m_next;
+ }
+ }
+
+ if (!sem)
+ {
+ if (BOOST_LIKELY(!m_free_semaphores.empty()))
+ {
+ sem = m_free_semaphores.pop_front();
+ }
+ else
+ {
+ sem = semaphore::create();
+ if (BOOST_UNLIKELY(!sem))
+ {
+ wait_fallback(state);
+ return;
+ }
+ }
+
+ m_wait_semaphores.push_front(sem);
+ }
+
+ ++sem->m_waiter_count;
+
+ state.unlock();
+
+ boost::winapi::WaitForSingleObject(sem->m_semaphore, boost::winapi::infinite);
+
+ state.long_lock();
+
+ --sem->m_waiter_count;
+
+ if (sem->m_notify_count > 0u)
+ {
+ // This semaphore is either in the notify list or not in a list at all
+ if (--sem->m_notify_count == 0u)
+ {
+ if (!sem->is_singular() || sem == m_notify_semaphores.front())
+ m_notify_semaphores.erase(sem);
+
+ semaphore_list* list = sem->m_waiter_count == 0u ? &m_free_semaphores : &m_wait_semaphores;
+ list->push_front(sem);
+ }
+ }
+ else if (sem->m_waiter_count == 0u)
+ {
+ // Move the semaphore to the free list
+ m_wait_semaphores.erase(sem);
+ m_free_semaphores.push_front(sem);
+ }
+}
+
+//! Fallback implementation of wait
+inline void wait_state::wait_fallback(lock_state& state) BOOST_NOEXCEPT
+{
+ state.unlock();
+
+ boost::winapi::Sleep(0);
+
+ state.long_lock();
+}
+
+#endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+
+#endif
+
+enum
+{
+ tail_size = sizeof(lock_state) % BOOST_ATOMIC_CACHE_LINE_SIZE,
+ padding_size = tail_size > 0 ? BOOST_ATOMIC_CACHE_LINE_SIZE - tail_size : 0u
+};
+
+template< unsigned int PaddingSize >
+struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state
+{
+ lock_state state;
+ // The additional padding is needed to avoid false sharing between locks
+ char padding[PaddingSize];
+};
+
+template< >
+struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state< 0u >
+{
+ lock_state state;
+};
+
+typedef padded_lock_state< padding_size > padded_lock_state_t;
+
+#if !defined(BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2)
+#define BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 8
+#endif
+#if (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2) < 0
+#error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is negative"
+#endif
+#define BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE (1ull << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2))
+
+//! Lock pool size. Must be a power of two.
+BOOST_CONSTEXPR_OR_CONST std::size_t lock_pool_size = static_cast< std::size_t >(1u) << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2);
+
+static padded_lock_state_t g_lock_pool[lock_pool_size] =
+{
+#if BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
+#if (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u) > BOOST_PP_LIMIT_ITERATION
+#error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is too large"
+#endif
+#define BOOST_PP_ITERATION_PARAMS_1 (3, (1, (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u), "lock_pool_init256.ipp"))
+#else // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
+#define BOOST_PP_ITERATION_PARAMS_1 (3, (1, BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE, "lock_pool_init1.ipp"))
+#endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
+#include BOOST_PP_ITERATE()
+#undef BOOST_PP_ITERATION_PARAMS_1
+};
+
+//! Pool cleanup function
+void cleanup_lock_pool()
+{
+ for (std::size_t i = 0u; i < lock_pool_size; ++i)
+ {
+ lock_state& state = g_lock_pool[i].state;
+ state.long_lock();
+ state.m_wait_states.m_free_memory = true;
+ state.m_wait_states.free_spare();
+ state.unlock();
+ }
+}
+
+BOOST_STATIC_ASSERT_MSG(once_flag_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for bytes");
+static once_flag g_pool_cleanup_registered = {};
+
+//! Returns index of the lock pool entry for the given pointer value
+BOOST_FORCEINLINE std::size_t get_lock_index(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
+{
+ return h & (lock_pool_size - 1u);
+}
+
+//! Finds an existing element with the given pointer to the atomic object or allocates a new one
+inline wait_state* wait_state_list::find_or_create(const volatile void* addr) BOOST_NOEXCEPT
+{
+ if (BOOST_UNLIKELY(m_header == NULL))
+ {
+ m_header = allocate_buffer(initial_capacity);
+ if (BOOST_UNLIKELY(m_header == NULL))
+ return NULL;
+ }
+ else
+ {
+ wait_state* ws = this->find(addr);
+ if (BOOST_LIKELY(ws != NULL))
+ return ws;
+
+ if (BOOST_UNLIKELY(m_header->size == m_header->capacity))
+ {
+ header* new_header = allocate_buffer(m_header->capacity * 2u, m_header);
+ if (BOOST_UNLIKELY(new_header == NULL))
+ return NULL;
+ boost::alignment::aligned_free(static_cast< void* >(m_header));
+ m_header = new_header;
+ }
+ }
+
+ const std::size_t index = m_header->size;
+ BOOST_ASSERT(index < m_header->capacity);
+
+ wait_state** pw = get_wait_states() + index;
+ wait_state* w = *pw;
+ if (BOOST_UNLIKELY(w == NULL))
+ {
+ w = new (std::nothrow) wait_state(index);
+ if (BOOST_UNLIKELY(w == NULL))
+ return NULL;
+ *pw = w;
+ }
+
+ get_atomic_pointers()[index] = addr;
+
+ ++m_header->size;
+
+ return w;
+}
+
+//! Releases the previously created wait state
+inline void wait_state_list::erase(wait_state* w) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(m_header != NULL);
+
+ const volatile void** pa = get_atomic_pointers();
+ wait_state** pw = get_wait_states();
+
+ std::size_t index = w->m_index;
+
+ BOOST_ASSERT(index < m_header->size);
+ BOOST_ASSERT(pw[index] == w);
+
+ std::size_t last_index = m_header->size - 1u;
+
+ if (index != last_index)
+ {
+ pa[index] = pa[last_index];
+ pa[last_index] = NULL;
+
+ wait_state* last_w = pw[last_index];
+ pw[index] = last_w;
+ pw[last_index] = w;
+
+ last_w->m_index = index;
+ w->m_index = last_index;
+ }
+ else
+ {
+ pa[index] = NULL;
+ }
+
+ --m_header->size;
+
+ if (BOOST_UNLIKELY(m_free_memory))
+ free_spare();
+}
+
+//! Allocates new buffer for the list entries
+wait_state_list::header* wait_state_list::allocate_buffer(std::size_t new_capacity, header* old_header) BOOST_NOEXCEPT
+{
+ if (BOOST_UNLIKELY(once_flag_operations::load(g_pool_cleanup_registered.m_flag, boost::memory_order_relaxed) == 0u))
+ {
+ if (once_flag_operations::exchange(g_pool_cleanup_registered.m_flag, 1u, boost::memory_order_relaxed) == 0u)
+ std::atexit(&cleanup_lock_pool);
+ }
+
+ const std::size_t new_buffer_size = entries_offset + new_capacity * sizeof(void*) * 2u;
+
+ void* p = boost::alignment::aligned_alloc(buffer_alignment, new_buffer_size);
+ if (BOOST_UNLIKELY(p == NULL))
+ return NULL;
+
+ header* h = new (p) header;
+ const volatile void** a = new (get_atomic_pointers(h)) const volatile void*[new_capacity];
+ wait_state** w = new (get_wait_states(a, new_capacity)) wait_state*[new_capacity];
+
+ if (BOOST_LIKELY(old_header != NULL))
+ {
+ BOOST_ASSERT(new_capacity >= old_header->capacity);
+
+ h->size = old_header->size;
+
+ const volatile void** old_a = get_atomic_pointers(old_header);
+ std::memcpy(a, old_a, old_header->size * sizeof(const volatile void*));
+ std::memset(a + old_header->size * sizeof(const volatile void*), 0, (new_capacity - old_header->size) * sizeof(const volatile void*));
+
+ wait_state** old_w = get_wait_states(old_a, old_header->capacity);
+ std::memcpy(w, old_w, old_header->capacity * sizeof(wait_state*)); // copy spare wait state pointers
+ std::memset(w + old_header->capacity * sizeof(wait_state*), 0, (new_capacity - old_header->capacity) * sizeof(wait_state*));
+ }
+ else
+ {
+ std::memset(p, 0, new_buffer_size);
+ }
+
+ h->capacity = new_capacity;
+
+ return h;
+}
+
+//! Deallocates spare entries and the list buffer if no allocated entries are left
+void wait_state_list::free_spare() BOOST_NOEXCEPT
+{
+ if (BOOST_LIKELY(m_header != NULL))
+ {
+ wait_state** ws = get_wait_states();
+ for (std::size_t i = m_header->size, n = m_header->capacity; i < n; ++i)
+ {
+ wait_state* w = ws[i];
+ if (!w)
+ break;
+
+ delete w;
+ ws[i] = NULL;
+ }
+
+ if (m_header->size == 0u)
+ {
+ boost::alignment::aligned_free(static_cast< void* >(m_header));
+ m_header = NULL;
+ }
+ }
+}
+
+} // namespace
+
+
+BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
+{
+ lock_state& ls = g_lock_pool[get_lock_index(h)].state;
+ ls.short_lock();
+ return &ls;
+}
+
+BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
+{
+ lock_state& ls = g_lock_pool[get_lock_index(h)].state;
+ ls.long_lock();
+ return &ls;
+}
+
+BOOST_ATOMIC_DECL void unlock(void* vls) BOOST_NOEXCEPT
+{
+ static_cast< lock_state* >(vls)->unlock();
+}
+
+
+BOOST_ATOMIC_DECL void* allocate_wait_state(void* vls, const volatile void* addr) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(vls != NULL);
+
+ lock_state* ls = static_cast< lock_state* >(vls);
+
+ // Note: find_or_create may fail to allocate memory. However, C++20 specifies that wait/notify operations
+ // are noexcept, so allocate_wait_state must succeed. To implement this we return NULL in case of failure and test for NULL
+ // in other wait/notify functions so that all of them become nop (which is a conforming, though inefficient behavior).
+ wait_state* ws = ls->m_wait_states.find_or_create(addr);
+
+ if (BOOST_LIKELY(ws != NULL))
+ ++ws->m_ref_count;
+
+ return ws;
+}
+
+BOOST_ATOMIC_DECL void free_wait_state(void* vls, void* vws) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(vls != NULL);
+
+ wait_state* ws = static_cast< wait_state* >(vws);
+ if (BOOST_LIKELY(ws != NULL))
+ {
+ if (--ws->m_ref_count == 0u)
+ {
+ lock_state* ls = static_cast< lock_state* >(vls);
+ ls->m_wait_states.erase(ws);
+ }
+ }
+}
+
+BOOST_ATOMIC_DECL void wait(void* vls, void* vws) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(vls != NULL);
+
+ lock_state* ls = static_cast< lock_state* >(vls);
+ wait_state* ws = static_cast< wait_state* >(vws);
+ if (BOOST_LIKELY(ws != NULL))
+ {
+ ws->wait(*ls);
+ }
+ else
+ {
+ // A conforming wait operation must unlock and lock the mutex to allow a notify to complete
+ ls->unlock();
+ atomics::detail::wait_some();
+ ls->long_lock();
+ }
+}
+
+BOOST_ATOMIC_DECL void notify_one(void* vls, const volatile void* addr) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(vls != NULL);
+
+ lock_state* ls = static_cast< lock_state* >(vls);
+ wait_state* ws = ls->m_wait_states.find(addr);
+ if (BOOST_LIKELY(ws != NULL))
+ ws->notify_one(*ls);
+}
+
+BOOST_ATOMIC_DECL void notify_all(void* vls, const volatile void* addr) BOOST_NOEXCEPT
+{
+ BOOST_ASSERT(vls != NULL);
+
+ lock_state* ls = static_cast< lock_state* >(vls);
+ wait_state* ws = ls->m_wait_states.find(addr);
+ if (BOOST_LIKELY(ws != NULL))
+ ws->notify_all(*ls);
+}
+
+
+BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT
+{
+#if BOOST_ATOMIC_THREAD_FENCE == 2
+ atomics::detail::fence_operations::thread_fence(memory_order_seq_cst);
+#else
+ // Emulate full fence by locking/unlocking a mutex
+ lock_pool::unlock(lock_pool::short_lock(0u));
+#endif
+}
+
+BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT
+{
+ // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
+#if BOOST_ATOMIC_SIGNAL_FENCE == 2
+ atomics::detail::fence_operations::signal_fence(memory_order_seq_cst);
+#endif
+}
+
+} // namespace lock_pool
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
diff --git a/contrib/restricted/boost/atomic/src/lockpool.cpp b/contrib/restricted/boost/atomic/src/lockpool.cpp
deleted file mode 100644
index a1292fa7bb..0000000000
--- a/contrib/restricted/boost/atomic/src/lockpool.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Distributed under the Boost Software License, Version 1.0.
- * (See accompanying file LICENSE_1_0.txt or copy at
- * http://www.boost.org/LICENSE_1_0.txt)
- *
- * Copyright (c) 2011 Helge Bahmann
- * Copyright (c) 2013-2014 Andrey Semashev
- */
-/*!
- * \file lockpool.cpp
- *
- * This file contains implementation of the lockpool used to emulate atomic ops.
- */
-
-#include <cstddef>
-#include <boost/config.hpp>
-#include <boost/assert.hpp>
-#include <boost/memory_order.hpp>
-#include <boost/atomic/capabilities.hpp>
-
-#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
-#include <boost/atomic/detail/operations_lockfree.hpp>
-#elif !defined(BOOST_HAS_PTHREADS)
-#error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
-#else
-#include <pthread.h>
-#define BOOST_ATOMIC_USE_PTHREAD
-#endif
-
-#include <boost/atomic/detail/lockpool.hpp>
-#include <boost/atomic/detail/pause.hpp>
-
-#if defined(BOOST_MSVC)
-#pragma warning(push)
-// 'struct_name' : structure was padded due to __declspec(align())
-#pragma warning(disable: 4324)
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-namespace {
-
-// Cache line size, in bytes
-// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
-#if defined(__s390__) || defined(__s390x__)
-#define BOOST_ATOMIC_CACHE_LINE_SIZE 256
-#elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
-#define BOOST_ATOMIC_CACHE_LINE_SIZE 128
-#else
-#define BOOST_ATOMIC_CACHE_LINE_SIZE 64
-#endif
-
-#if defined(BOOST_ATOMIC_USE_PTHREAD)
-typedef pthread_mutex_t lock_type;
-#else
-typedef atomics::detail::operations< 1u, false > lock_operations;
-typedef lock_operations::storage_type lock_type;
-#endif
-
-enum
-{
- padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
- (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
- (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
-};
-
-template< unsigned int PaddingSize >
-struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
-{
- lock_type lock;
- // The additional padding is needed to avoid false sharing between locks
- char padding[PaddingSize];
-};
-
-template< >
-struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
-{
- lock_type lock;
-};
-
-typedef padded_lock< padding_size > padded_lock_t;
-
-static padded_lock_t g_lock_pool[41]
-#if defined(BOOST_ATOMIC_USE_PTHREAD)
-=
-{
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
- { PTHREAD_MUTEX_INITIALIZER }
-}
-#endif
-;
-
-} // namespace
-
-
-#if !defined(BOOST_ATOMIC_USE_PTHREAD)
-
-// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
-BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
- m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
-{
- while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire))
- {
- do
- {
- atomics::detail::pause();
- }
- while (!!lock_operations::load(*static_cast< lock_type* >(m_lock), memory_order_relaxed));
- }
-}
-
-BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
-{
- lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release);
-}
-
-BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
-
-#else // !defined(BOOST_ATOMIC_USE_PTHREAD)
-
-BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
- m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
-{
- BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
-}
-
-BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
-{
- BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
-}
-
-#endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
-
-BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT
-{
-#if BOOST_ATOMIC_THREAD_FENCE > 0
- atomics::detail::thread_fence(memory_order_seq_cst);
-#else
- // Emulate full fence by locking/unlocking a mutex
- scoped_lock lock(0);
-#endif
-}
-
-BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT
-{
- // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
-#if BOOST_ATOMIC_SIGNAL_FENCE > 0
- atomics::detail::signal_fence(memory_order_seq_cst);
-#endif
-}
-
-} // namespace detail
-} // namespace atomics
-} // namespace boost
-
-#if defined(BOOST_MSVC)
-#pragma warning(pop)
-#endif
diff --git a/contrib/restricted/boost/atomic/src/x86_vector_tools.hpp b/contrib/restricted/boost/atomic/src/x86_vector_tools.hpp
new file mode 100644
index 0000000000..7d9e33203d
--- /dev/null
+++ b/contrib/restricted/boost/atomic/src/x86_vector_tools.hpp
@@ -0,0 +1,52 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file x86_vector_tools.hpp
+ *
+ * This file contains common tools for x86 vectorization
+ */
+
+#ifndef BOOST_ATOMIC_X86_VECTOR_TOOLS_HPP_INCLUDED_
+#define BOOST_ATOMIC_X86_VECTOR_TOOLS_HPP_INCLUDED_
+
+#include <boost/predef/architecture/x86.h>
+#include <boost/atomic/detail/int_sizes.hpp>
+
+#if BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8)
+
+#include <emmintrin.h>
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/intptr.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#include <boost/atomic/detail/header.hpp>
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+BOOST_FORCEINLINE __m128i mm_set1_epiptr(uintptr_t ptr)
+{
+#if !defined(_MSC_VER) || _MSC_FULL_VER >= 190024210
+ return _mm_set1_epi64x(ptr);
+#else
+ // MSVC up until 14.0 update 3 doesn't provide _mm_set1_epi64x
+ uint32_t lo = static_cast< uint32_t >(ptr), hi = static_cast< uint32_t >(ptr >> 32);
+ return _mm_set_epi32(hi, lo, hi, lo);
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8)
+
+#endif // BOOST_ATOMIC_X86_VECTOR_TOOLS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/winapi/include/boost/winapi/critical_section.hpp b/contrib/restricted/boost/winapi/include/boost/winapi/critical_section.hpp
new file mode 100644
index 0000000000..ac850d30ed
--- /dev/null
+++ b/contrib/restricted/boost/winapi/include/boost/winapi/critical_section.hpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2010 Vicente J. Botet Escriba
+ * Copyright 2015 Andrey Semashev
+ *
+ * Distributed under the Boost Software License, Version 1.0.
+ * See http://www.boost.org/LICENSE_1_0.txt
+ */
+
+#ifndef BOOST_WINAPI_CRITICAL_SECTION_HPP_INCLUDED_
+#define BOOST_WINAPI_CRITICAL_SECTION_HPP_INCLUDED_
+
+#include <boost/winapi/basic_types.hpp>
+#include <boost/winapi/detail/cast_ptr.hpp>
+#include <boost/winapi/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if !defined( BOOST_USE_WINDOWS_H )
+
+extern "C" {
+#if !defined( BOOST_WINAPI_IS_MINGW )
+
+// Windows CE uses a different name for the structure
+#if defined (_WIN32_WCE)
+struct CRITICAL_SECTION;
+namespace boost {
+namespace winapi {
+namespace detail {
+ typedef CRITICAL_SECTION winsdk_critical_section;
+}
+}
+}
+#else
+struct _RTL_CRITICAL_SECTION;
+namespace boost {
+namespace winapi {
+namespace detail {
+ typedef _RTL_CRITICAL_SECTION winsdk_critical_section;
+}
+}
+}
+#endif
+
+#else
+// MinGW uses a different name for the structure
+struct _CRITICAL_SECTION;
+
+namespace boost {
+namespace winapi {
+namespace detail {
+ typedef _CRITICAL_SECTION winapi_critical_section;
+}
+}
+}
+#endif
+
+#if !defined( BOOST_WINAPI_IS_MINGW )
+
+#if BOOST_WINAPI_PARTITION_APP_SYSTEM
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSection(boost::winapi::detail::winsdk_critical_section* lpCriticalSection);
+#endif
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+EnterCriticalSection(boost::winapi::detail::winsdk_critical_section* lpCriticalSection);
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+LeaveCriticalSection(boost::winapi::detail::winsdk_critical_section* lpCriticalSection);
+
+#if BOOST_USE_WINAPI_VERSION >= 0x0403
+#if BOOST_WINAPI_PARTITION_APP_SYSTEM
+BOOST_WINAPI_IMPORT boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSectionAndSpinCount(
+ boost::winapi::detail::winsdk_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount);
+
+BOOST_WINAPI_IMPORT boost::winapi::DWORD_ BOOST_WINAPI_WINAPI_CC
+SetCriticalSectionSpinCount(
+ boost::winapi::detail::winsdk_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount);
+#endif
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+BOOST_WINAPI_IMPORT boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSectionEx(
+ boost::winapi::detail::winsdk_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount,
+ boost::winapi::DWORD_ Flags);
+#endif
+#endif
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_NT4
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+TryEnterCriticalSection(boost::winapi::detail::winsdk_critical_section* lpCriticalSection);
+#endif
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+DeleteCriticalSection(boost::winapi::detail::winsdk_critical_section* lpCriticalSection);
+
+#else // defined( BOOST_WINAPI_IS_MINGW )
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSection(boost::winapi::detail::winapi_critical_section* lpCriticalSection);
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+EnterCriticalSection(boost::winapi::detail::winapi_critical_section* lpCriticalSection);
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+LeaveCriticalSection(boost::winapi::detail::winapi_critical_section* lpCriticalSection);
+
+#if BOOST_USE_WINAPI_VERSION >= 0x0403
+BOOST_WINAPI_IMPORT boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSectionAndSpinCount(
+ boost::winapi::detail::winapi_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount);
+
+BOOST_WINAPI_IMPORT boost::winapi::DWORD_ BOOST_WINAPI_WINAPI_CC
+SetCriticalSectionSpinCount(
+ boost::winapi::detail::winapi_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount);
+#endif
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+BOOST_WINAPI_IMPORT boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+InitializeCriticalSectionEx(
+ boost::winapi::detail::winapi_critical_section* lpCriticalSection,
+ boost::winapi::DWORD_ dwSpinCount,
+ boost::winapi::DWORD_ Flags);
+#endif
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_NT4
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::BOOL_ BOOST_WINAPI_WINAPI_CC
+TryEnterCriticalSection(boost::winapi::detail::winapi_critical_section* lpCriticalSection);
+#endif
+
+BOOST_WINAPI_IMPORT_EXCEPT_WM boost::winapi::VOID_ BOOST_WINAPI_WINAPI_CC
+DeleteCriticalSection(boost::winapi::detail::winapi_critical_section* lpCriticalSection);
+
+#endif // defined( BOOST_WINAPI_IS_MINGW )
+} // extern "C"
+#endif
+
+namespace boost {
+namespace winapi {
+
+
+#pragma pack(push, 8)
+
+#if !defined(_WIN32_WCE)
+
+struct _RTL_CRITICAL_SECTION_DEBUG;
+
+typedef struct BOOST_MAY_ALIAS _RTL_CRITICAL_SECTION {
+ _RTL_CRITICAL_SECTION_DEBUG* DebugInfo;
+ LONG_ LockCount;
+ LONG_ RecursionCount;
+ HANDLE_ OwningThread;
+ HANDLE_ LockSemaphore;
+ ULONG_PTR_ SpinCount;
+} CRITICAL_SECTION_, *PCRITICAL_SECTION_;
+
+#else
+
+// Windows CE has different layout
+typedef struct BOOST_MAY_ALIAS CRITICAL_SECTION {
+ unsigned int LockCount;
+ HANDLE OwnerThread;
+ HANDLE hCrit;
+ DWORD needtrap;
+ DWORD dwContentions;
+} CRITICAL_SECTION_, *LPCRITICAL_SECTION_;
+
+#endif
+
+#pragma pack(pop)
+
+#if BOOST_WINAPI_PARTITION_APP_SYSTEM
+BOOST_FORCEINLINE VOID_ InitializeCriticalSection(CRITICAL_SECTION_* lpCriticalSection)
+{
+ ::InitializeCriticalSection(winapi::detail::cast_ptr(lpCriticalSection));
+}
+#endif
+
+BOOST_FORCEINLINE VOID_ EnterCriticalSection(CRITICAL_SECTION_* lpCriticalSection)
+{
+ ::EnterCriticalSection(winapi::detail::cast_ptr(lpCriticalSection));
+}
+
+BOOST_FORCEINLINE VOID_ LeaveCriticalSection(CRITICAL_SECTION_* lpCriticalSection)
+{
+ ::LeaveCriticalSection(winapi::detail::cast_ptr(lpCriticalSection));
+}
+
+#if BOOST_USE_WINAPI_VERSION >= 0x0403
+#if BOOST_WINAPI_PARTITION_APP_SYSTEM
+BOOST_FORCEINLINE BOOL_ InitializeCriticalSectionAndSpinCount(CRITICAL_SECTION_* lpCriticalSection, DWORD_ dwSpinCount)
+{
+ return ::InitializeCriticalSectionAndSpinCount(winapi::detail::cast_ptr(lpCriticalSection), dwSpinCount);
+}
+
+BOOST_FORCEINLINE DWORD_ SetCriticalSectionSpinCount(CRITICAL_SECTION_* lpCriticalSection, DWORD_ dwSpinCount)
+{
+ return ::SetCriticalSectionSpinCount(winapi::detail::cast_ptr(lpCriticalSection), dwSpinCount);
+}
+#endif
+
+// CRITICAL_SECTION_NO_DEBUG_INFO is defined for WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+BOOST_CONSTEXPR_OR_CONST DWORD_ CRITICAL_SECTION_NO_DEBUG_INFO_ = 0x01000000;
+BOOST_CONSTEXPR_OR_CONST DWORD_ CRITICAL_SECTION_FLAG_NO_DEBUG_INFO_ = CRITICAL_SECTION_NO_DEBUG_INFO_;
+BOOST_CONSTEXPR_OR_CONST DWORD_ CRITICAL_SECTION_FLAG_DYNAMIC_SPIN_ = 0x02000000; // undocumented
+BOOST_CONSTEXPR_OR_CONST DWORD_ CRITICAL_SECTION_FLAG_STATIC_INIT_ = 0x04000000; // undocumented
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+BOOST_FORCEINLINE BOOL_ InitializeCriticalSectionEx(CRITICAL_SECTION_* lpCriticalSection, DWORD_ dwSpinCount, DWORD_ Flags)
+{
+ return ::InitializeCriticalSectionEx(winapi::detail::cast_ptr(lpCriticalSection), dwSpinCount, Flags);
+}
+#endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
+#endif // BOOST_USE_WINAPI_VERSION >= 0x0403
+
+#if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_NT4
+BOOST_FORCEINLINE BOOL_ TryEnterCriticalSection(CRITICAL_SECTION_* lpCriticalSection)
+{
+ return ::TryEnterCriticalSection(winapi::detail::cast_ptr(lpCriticalSection));
+}
+#endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_NT4
+
+BOOST_FORCEINLINE VOID_ DeleteCriticalSection(CRITICAL_SECTION_* lpCriticalSection)
+{
+ ::DeleteCriticalSection(winapi::detail::cast_ptr(lpCriticalSection));
+}
+
+}
+}
+
+#include <boost/winapi/detail/footer.hpp>
+
+#endif // BOOST_WINAPI_CRITICAL_SECTION_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/winapi/include/boost/winapi/detail/cast_ptr.hpp b/contrib/restricted/boost/winapi/include/boost/winapi/detail/cast_ptr.hpp
new file mode 100644
index 0000000000..4e089b3b40
--- /dev/null
+++ b/contrib/restricted/boost/winapi/include/boost/winapi/detail/cast_ptr.hpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 Andrey Semashev
+ *
+ * Distributed under the Boost Software License, Version 1.0.
+ * See http://www.boost.org/LICENSE_1_0.txt
+ */
+
+#ifndef BOOST_WINAPI_DETAIL_CAST_PTR_HPP_INCLUDED_
+#define BOOST_WINAPI_DETAIL_CAST_PTR_HPP_INCLUDED_
+
+#include <boost/winapi/config.hpp>
+#include <boost/winapi/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace winapi {
+namespace detail {
+
+//! This class is used to automatically cast pointers to the type used in the current Windows SDK function declarations
+class cast_ptr
+{
+private:
+ const void* m_p;
+
+public:
+ explicit BOOST_FORCEINLINE cast_ptr(const void* p) BOOST_NOEXCEPT : m_p(p) {}
+ template< typename T >
+ BOOST_FORCEINLINE operator T* () const BOOST_NOEXCEPT { return (T*)m_p; }
+};
+
+}
+}
+}
+
+#include <boost/winapi/detail/footer.hpp>
+
+#endif // BOOST_WINAPI_DETAIL_CAST_PTR_HPP_INCLUDED_