aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralexv-smirnov <alex@ydb.tech>2022-07-29 15:45:39 +0300
committeralexv-smirnov <alex@ydb.tech>2022-07-29 15:45:39 +0300
commitab1192e6186daeb546d36ea820f8936ec2c96f9d (patch)
tree5a71fef0346f2d689c6f769e22940f7b1e44f7a1
parentdebf07d0f4cba16b288221fd3db665d112627b44 (diff)
downloadydb-ab1192e6186daeb546d36ea820f8936ec2c96f9d.tar.gz
add contrib/restricted/boost/atomic/include to ydb sync config
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp34
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp39
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp133
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp37
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp34
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp61
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp40
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_linux_arm.hpp35
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp34
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp55
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_windows.hpp33
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp1111
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp840
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp1656
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp106
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp1301
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp67
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp42
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp58
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp522
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp107
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp69
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp876
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp1397
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp134
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp392
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp1232
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp70
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp240
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp240
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp563
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp556
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp180
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp824
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp38
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp908
-rw-r--r--contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp218
37 files changed, 14282 insertions, 0 deletions
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp
new file mode 100644
index 0000000000..861432f58a
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_alpha.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_alpha.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp
new file mode 100644
index 0000000000..a26ea56ee5
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_arm.hpp
@@ -0,0 +1,39 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * ARM Code by Phil Endecott, based on other architectures.
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp
new file mode 100644
index 0000000000..3b518cf49c
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_atomic.hpp
@@ -0,0 +1,133 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_atomic.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/int_sizes.hpp>
+#if defined(__i386__) || defined(__x86_64__)
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#elif defined(__arm__)
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#elif defined(__POWERPC__) || defined(__PPC__)
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_INT128_LOCK_FREE 0
+#endif
+
+#if (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8)
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
+#endif
+
+#if (__GCC_ATOMIC_LONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8)
+#define BOOST_ATOMIC_LONG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#endif
+
+#if __GCC_ATOMIC_INT_LOCK_FREE == 2
+#define BOOST_ATOMIC_INT_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#endif
+
+#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#endif
+
+#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2
+#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#endif
+
+#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
+#endif
+
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT16_LOCK_FREE 0
+#endif
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
+#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT32_LOCK_FREE 0
+#endif
+
+#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
+#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
+#else
+#define BOOST_ATOMIC_INT64_LOCK_FREE 0
+#endif
+
+
+#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
+#else
+#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
+#endif
+
+#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
+#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp
new file mode 100644
index 0000000000..3e20fdee45
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_ppc.hpp
@@ -0,0 +1,37 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_ppc.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp
new file mode 100644
index 0000000000..5806684926
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sparc.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2010 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_sparc.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp
new file mode 100644
index 0000000000..ffbe605a1a
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_sync.hpp
@@ -0,0 +1,61 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_sync.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if defined(__i386__) || defined(__x86_64__)
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#elif defined(__arm__)
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#elif defined(__POWERPC__) || defined(__PPC__)
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
+ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp
new file mode 100644
index 0000000000..70c64628af
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_gcc_x86.hpp
@@ -0,0 +1,40 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2013 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_gcc_x86.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_linux_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_linux_arm.hpp
new file mode 100644
index 0000000000..abe6fb81af
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_linux_arm.hpp
@@ -0,0 +1,35 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009, 2011 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * Linux-specific code by Phil Endecott
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_linux_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp
new file mode 100644
index 0000000000..6b3c61fb3e
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_arm.hpp
@@ -0,0 +1,34 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_msvc_arm.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp
new file mode 100644
index 0000000000..2ee4c92111
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_msvc_x86.hpp
@@ -0,0 +1,55 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_msvc_x86.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(_M_IX86) && _M_IX86 >= 500
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
+// Use mfence only if SSE2 is available
+#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+
+#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
+#define BOOST_ATOMIC_INT128_LOCK_FREE 2
+#endif
+
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_windows.hpp
new file mode 100644
index 0000000000..1cc0ded833
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/caps_windows.hpp
@@ -0,0 +1,33 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2012 - 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/caps_windows.hpp
+ *
+ * This header defines feature capabilities macros
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp
new file mode 100644
index 0000000000..e84f1771da
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_arm.hpp
@@ -0,0 +1,1111 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 - 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_arm.hpp
+ *
+ * This header contains implementation of the extra atomic operations for ARM.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct gcc_arm_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct gcc_arm_extra_operations;
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+template< typename Base, bool Signed >
+struct gcc_arm_extra_operations< Base, 1u, Signed > :
+ public generic_extra_operations< Base, 1u, Signed >
+{
+ typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 1u, Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< typename Base, bool Signed >
+struct gcc_arm_extra_operations< Base, 2u, Signed > :
+ public generic_extra_operations< Base, 2u, Signed >
+{
+ typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 2u, Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< typename Base, bool Signed >
+struct gcc_arm_extra_operations< Base, 4u, Signed > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 4u, Signed > >
+{
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+template< typename Base, bool Signed >
+struct gcc_arm_extra_operations< Base, 8u, Signed > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "adds %2, %2, #1\n" // result = result + 1
+ "adc %H2, %H2, #0\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "adds %2, %2, #1\n" // result = result + 1
+ "adc %H2, %H2, #0\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "adds %2, %1, %4\n" // result = original + value
+ "adc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "subs %2, %1, %4\n" // result = original - value
+ "sbc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "and %2, %1, %4\n" // result = original & value
+ "and %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "orr %2, %1, %4\n" // result = original | value
+ "orr %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "eor %2, %1, %4\n" // result = original ^ value
+ "eor %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 8u, Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
new file mode 100644
index 0000000000..dc4bbdbf74
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
@@ -0,0 +1,840 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 - 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_ppc.hpp
+ *
+ * This header contains implementation of the extra atomic operations for PowerPC.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct gcc_ppc_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct gcc_ppc_extra_operations;
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+template< typename Base, bool Signed >
+struct gcc_ppc_extra_operations< Base, 1u, Signed > :
+ public generic_extra_operations< Base, 1u, Signed >
+{
+ typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 1u, Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< typename Base, bool Signed >
+struct gcc_ppc_extra_operations< Base, 2u, Signed > :
+ public generic_extra_operations< Base, 2u, Signed >
+{
+ typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< typename Base, bool Signed >
+struct gcc_ppc_extra_operations< Base, 4u, Signed > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 4u, Signed > >
+{
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+template< typename Base, bool Signed >
+struct gcc_ppc_extra_operations< Base, 8u, Signed > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 8u, Signed > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_PPC_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp
new file mode 100644
index 0000000000..ee2cd02a88
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_gcc_x86.hpp
@@ -0,0 +1,1656 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2015 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_x86.hpp
+ *
+ * This header contains implementation of the extra atomic operations for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct gcc_x86_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(Base::fetch_add(storage, v, order) + v);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(Base::fetch_sub(storage, v, order) - v);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; bts %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; bts %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btr %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btr %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btc %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btc %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movzbl %[orig], %2\n\t"\
+ op " %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%al, %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movzwl %[orig], %2\n\t"\
+ op " %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%ax, %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[orig], %[res]\n\t"\
+ op " %[res]\n\t"\
+ "lock; cmpxchgl %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", original, result);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", original, result);
+ return result;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[res]\n\t"\
+ op " %%eax, %[res]\n\t"\
+ "lock; cmpxchgl %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : [arg] "ir" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+#if defined(__x86_64__)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[orig], %[res]\n\t"\
+ op " %[res]\n\t"\
+ "lock; cmpxchgq %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", original, result);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", original, result);
+ return result;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[res]\n\t"\
+ op " %%rax, %[res]\n\t"\
+ "lock; cmpxchgq %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : [arg] "r" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ "setnz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+#endif // defined(__x86_64__)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp
new file mode 100644
index 0000000000..b8eb5bcb31
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_arm.hpp
@@ -0,0 +1,106 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_msvc_arm.hpp
+ *
+ * This header contains implementation of the extra atomic operations for ARM.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE)
+ bool result;
+ switch (order)
+ {
+ case memory_order_relaxed:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(&storage, bit_number);
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(&storage, bit_number);
+ break;
+ case memory_order_release:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(&storage, bit_number);
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+ break;
+ }
+ return result;
+#else
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE)
+ bool result;
+ switch (order)
+ {
+ case memory_order_relaxed:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(&storage, bit_number);
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(&storage, bit_number);
+ break;
+ case memory_order_release:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(&storage, bit_number);
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+ break;
+ }
+ return result;
+#else
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+#endif
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp
new file mode 100644
index 0000000000..17451a83d6
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/extra_ops_msvc_x86.hpp
@@ -0,0 +1,1301 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_msvc_x86.hpp
+ *
+ * This header contains implementation of the extra atomic operations for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// frame pointer register 'ebx' modified by inline assembly code
+#pragma warning(disable: 4731)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
+
+template< typename Base, std::size_t Size, bool Signed >
+struct msvc_x86_extra_operations_common :
+ public generic_extra_operations< Base, Size, Signed >
+{
+ typedef generic_extra_operations< Base, Size, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS)
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+ }
+#else
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock bts [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+ }
+#else
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btr [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btc [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public msvc_x86_extra_operations_common< Base, 1u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov old_val, al
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov new_val, dl
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ test dl, dl
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ and dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ or dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ xor dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov old_val, al
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov new_val, dl
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ test dl, dl
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg byte ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not byte ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add byte ptr [edx], al
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub byte ptr [edx], al
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and byte ptr [edx], al
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or byte ptr [edx], al
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor byte ptr [edx], al
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public msvc_x86_extra_operations_common< Base, 2u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov old_val, ax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov new_val, dx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ test dx, dx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ and dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ or dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ xor dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov old_val, ax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov new_val, dx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ test dx, dx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg word ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not word ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add word ptr [edx], ax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub word ptr [edx], ax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and word ptr [edx], ax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or word ptr [edx], ax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor word ptr [edx], ax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public msvc_x86_extra_operations_common< Base, 4u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov old_val, eax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov new_val, edx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ test edx, edx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ and edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ or edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ xor edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov old_val, eax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov new_val, edx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ test edx, edx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock add dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock sub dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg dword ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock and dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock or dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock xor dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not dword ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock add dword ptr [edx], eax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock sub dword ptr [edx], eax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock and dword ptr [edx], eax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock or dword ptr [edx], eax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock xor dword ptr [edx], eax
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+#endif // defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS64) && defined(BOOST_ATOMIC_INTERLOCKED_BTR64)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS64(&storage, bit_number);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR64(&storage, bit_number);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS64) && defined(BOOST_ATOMIC_INTERLOCKED_BTR64)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp
new file mode 100644
index 0000000000..6d0c338622
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_arm.hpp
@@ -0,0 +1,67 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_arm.hpp
+ *
+ * This header defines hardware capabilities macros for ARM
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+
+#if BOOST_ATOMIC_DETAIL_ARM_ARCH > 6
+// ARMv7 and later have dmb instruction
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
+#endif
+
+#if defined(__ARM_FEATURE_LDREX)
+
+#if (__ARM_FEATURE_LDREX & 1)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
+#endif
+#if (__ARM_FEATURE_LDREX & 2)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
+#endif
+#if (__ARM_FEATURE_LDREX & 8)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
+#endif
+
+#else // defined(__ARM_FEATURE_LDREX)
+
+#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
+
+// ARMv6k and ARMv7 have 8 and 16-bit ldrex/strex variants, but at least GCC 4.7 fails to compile them. GCC 4.9 is known to work.
+#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
+#endif
+
+#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7M__))
+// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
+// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
+// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
+// or ARMv7 (both ARM and Thumb 2) it works as expected.
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
+#endif
+
+#endif // !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
+
+#endif // defined(__ARM_FEATURE_LDREX)
+
+#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp
new file mode 100644
index 0000000000..2ec1e327a7
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_ppc.hpp
@@ -0,0 +1,42 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_ppc.hpp
+ *
+ * This header defines hardware capabilities macros for PowerPC
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__POWERPC__) || defined(__PPC__)
+
+#if defined(_ARCH_PWR8)
+// Power8 and later architectures have 8 and 16-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX
+#endif
+
+#if defined(__powerpc64__) || defined(__PPC64__)
+// Power7 and later architectures in 64-bit mode have 64-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX
+#if defined(_ARCH_PWR8)
+// Power8 also has 128-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LQARX_STQCX
+#endif
+#endif
+
+#endif // defined(__POWERPC__) || defined(__PPC__)
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp
new file mode 100644
index 0000000000..91a1aee3aa
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/hwcaps_gcc_x86.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_x86.hpp
+ *
+ * This header defines hardware capabilities macros for x86
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__GNUC__)
+
+#if defined(__i386__) &&\
+ (\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
+ defined(__i586__) || defined(__i686__) || defined(__SSE__)\
+ )
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if defined(__x86_64__) || defined(__SSE2__)
+// Use mfence only if SSE2 is available
+#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
+#endif
+
+#else // defined(__GNUC__)
+
+#if defined(__i386__) && !defined(BOOST_ATOMIC_NO_CMPXCHG8B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_MFENCE)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
+#endif
+
+#endif // defined(__GNUC__)
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp
new file mode 100644
index 0000000000..774354fb7f
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/interlocked.hpp
@@ -0,0 +1,522 @@
+#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
+#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+// Copyright (c) 2012 - 2014, 2017 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(_WIN32_WCE)
+
+#if _WIN32_WCE >= 0x600
+
+extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
+extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
+extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+
+#else // _WIN32_WCE >= 0x600
+
+extern "C" long __cdecl InterlockedCompareExchange( long*, long, long );
+extern "C" long __cdecl InterlockedExchangeAdd( long*, long );
+extern "C" long __cdecl InterlockedExchange( long*, long );
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+
+#endif // _WIN32_WCE >= 0x600
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
+
+#elif defined(_MSC_VER) && _MSC_VER >= 1310
+
+#if _MSC_VER < 1400
+
+extern "C" long __cdecl _InterlockedCompareExchange( long volatile *, long, long );
+extern "C" long __cdecl _InterlockedExchangeAdd( long volatile *, long );
+extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
+
+#else // _MSC_VER < 1400
+
+#include <intrin.h>
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#pragma intrinsic(_InterlockedXor)
+#pragma intrinsic(_interlockedbittestandset)
+#pragma intrinsic(_interlockedbittestandreset)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS(dest, arg) _interlockedbittestandset((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR(dest, arg) _interlockedbittestandreset((long*)(dest), (long)(arg))
+
+#if defined(_M_AMD64)
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandset64)
+#pragma intrinsic(_interlockedbittestandreset64)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTS64(dest, arg) _interlockedbittestandset64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR64(dest, arg) _interlockedbittestandreset64((__int64*)(dest), (__int64)(arg))
+#endif // defined(_M_AMD64)
+
+#if (defined(_M_IX86) && _M_IX86 >= 500) || defined(_M_AMD64) || defined(_M_IA64)
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#endif
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#endif
+
+#if _MSC_VER >= 1500 && defined(_M_AMD64)
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange128)
+#endif
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) _InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
+#endif
+
+#if _MSC_VER >= 1600
+
+// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
+// Note that for each bit count these macros must be either all defined or all not defined.
+// Otherwise atomic<> operations will be implemented inconsistently.
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange8)
+#pragma intrinsic(_InterlockedExchangeAdd8)
+#pragma intrinsic(_InterlockedExchange8)
+#pragma intrinsic(_InterlockedAnd8)
+#pragma intrinsic(_InterlockedOr8)
+#pragma intrinsic(_InterlockedXor8)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange16)
+#pragma intrinsic(_InterlockedExchangeAdd16)
+#pragma intrinsic(_InterlockedExchange16)
+#pragma intrinsic(_InterlockedAnd16)
+#pragma intrinsic(_InterlockedOr16)
+#pragma intrinsic(_InterlockedXor16)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
+
+#endif // _MSC_VER >= 1600
+
+#if defined(_M_AMD64) || defined(_M_IA64)
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedXor64)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#pragma intrinsic(_InterlockedExchangePointer)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
+
+#elif defined(_M_IX86)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
+
+#endif
+
+#if _MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedXor64)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedCompareExchange8_nf)
+#pragma intrinsic(_InterlockedCompareExchange8_acq)
+#pragma intrinsic(_InterlockedCompareExchange8_rel)
+#pragma intrinsic(_InterlockedCompareExchange16_nf)
+#pragma intrinsic(_InterlockedCompareExchange16_acq)
+#pragma intrinsic(_InterlockedCompareExchange16_rel)
+#pragma intrinsic(_InterlockedCompareExchange_nf)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#pragma intrinsic(_InterlockedCompareExchange64_nf)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#pragma intrinsic(_InterlockedCompareExchangePointer_nf)
+#pragma intrinsic(_InterlockedCompareExchangePointer_acq)
+#pragma intrinsic(_InterlockedCompareExchangePointer_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) _InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) _InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) _InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) _InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) _InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) _InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) _InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) _InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) _InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) _InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedExchangeAdd8_nf)
+#pragma intrinsic(_InterlockedExchangeAdd8_acq)
+#pragma intrinsic(_InterlockedExchangeAdd8_rel)
+#pragma intrinsic(_InterlockedExchangeAdd16_nf)
+#pragma intrinsic(_InterlockedExchangeAdd16_acq)
+#pragma intrinsic(_InterlockedExchangeAdd16_rel)
+#pragma intrinsic(_InterlockedExchangeAdd_nf)
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedExchangeAdd_rel)
+#pragma intrinsic(_InterlockedExchangeAdd64_nf)
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedExchangeAdd64_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend))
+
+#if defined(_M_ARM64)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((__int64*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED((__int64*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE((__int64*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE((__int64*)(dest), byte_offset))
+#else
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset))
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedExchange8_nf)
+#pragma intrinsic(_InterlockedExchange8_acq)
+#pragma intrinsic(_InterlockedExchange16_nf)
+#pragma intrinsic(_InterlockedExchange16_acq)
+#pragma intrinsic(_InterlockedExchange_nf)
+#pragma intrinsic(_InterlockedExchange_acq)
+#pragma intrinsic(_InterlockedExchange64_nf)
+#pragma intrinsic(_InterlockedExchange64_acq)
+#pragma intrinsic(_InterlockedExchangePointer)
+#pragma intrinsic(_InterlockedExchangePointer_nf)
+#pragma intrinsic(_InterlockedExchangePointer_acq)
+#if _MSC_VER >= 1800
+#pragma intrinsic(_InterlockedExchange8_rel)
+#pragma intrinsic(_InterlockedExchange16_rel)
+#pragma intrinsic(_InterlockedExchange_rel)
+#pragma intrinsic(_InterlockedExchange64_rel)
+#pragma intrinsic(_InterlockedExchangePointer_rel)
+#endif
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval))
+
+#if _MSC_VER >= 1800
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval))
+#else
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedAnd8_nf)
+#pragma intrinsic(_InterlockedAnd8_acq)
+#pragma intrinsic(_InterlockedAnd8_rel)
+#pragma intrinsic(_InterlockedAnd16_nf)
+#pragma intrinsic(_InterlockedAnd16_acq)
+#pragma intrinsic(_InterlockedAnd16_rel)
+#pragma intrinsic(_InterlockedAnd_nf)
+#pragma intrinsic(_InterlockedAnd_acq)
+#pragma intrinsic(_InterlockedAnd_rel)
+#pragma intrinsic(_InterlockedAnd64_nf)
+#pragma intrinsic(_InterlockedAnd64_acq)
+#pragma intrinsic(_InterlockedAnd64_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedOr8_nf)
+#pragma intrinsic(_InterlockedOr8_acq)
+#pragma intrinsic(_InterlockedOr8_rel)
+#pragma intrinsic(_InterlockedOr16_nf)
+#pragma intrinsic(_InterlockedOr16_acq)
+#pragma intrinsic(_InterlockedOr16_rel)
+#pragma intrinsic(_InterlockedOr_nf)
+#pragma intrinsic(_InterlockedOr_acq)
+#pragma intrinsic(_InterlockedOr_rel)
+#pragma intrinsic(_InterlockedOr64_nf)
+#pragma intrinsic(_InterlockedOr64_acq)
+#pragma intrinsic(_InterlockedOr64_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_InterlockedXor8_nf)
+#pragma intrinsic(_InterlockedXor8_acq)
+#pragma intrinsic(_InterlockedXor8_rel)
+#pragma intrinsic(_InterlockedXor16_nf)
+#pragma intrinsic(_InterlockedXor16_acq)
+#pragma intrinsic(_InterlockedXor16_rel)
+#pragma intrinsic(_InterlockedXor_nf)
+#pragma intrinsic(_InterlockedXor_acq)
+#pragma intrinsic(_InterlockedXor_rel)
+#pragma intrinsic(_InterlockedXor64_nf)
+#pragma intrinsic(_InterlockedXor64_acq)
+#pragma intrinsic(_InterlockedXor64_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandset_nf)
+#pragma intrinsic(_interlockedbittestandset_acq)
+#pragma intrinsic(_interlockedbittestandset_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(dest, arg) _interlockedbittestandset_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(dest, arg) _interlockedbittestandset_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(dest, arg) _interlockedbittestandset_rel((long*)(dest), (long)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandreset_nf)
+#pragma intrinsic(_interlockedbittestandreset_acq)
+#pragma intrinsic(_interlockedbittestandreset_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(dest, arg) _interlockedbittestandreset_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(dest, arg) _interlockedbittestandreset_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(dest, arg) _interlockedbittestandreset_rel((long*)(dest), (long)(arg))
+
+#endif // _MSC_VER >= 1700 && defined(_M_ARM)
+
+#endif // _MSC_VER < 1400
+
+#else // defined(_MSC_VER) && _MSC_VER >= 1310
+
+#if defined(BOOST_USE_WINDOWS_H)
+
+#include <windows.h>
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
+
+#if defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+#else // defined(BOOST_USE_WINDOWS_H)
+
+#if defined(__MINGW64__)
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT
+#else
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+extern "C" {
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
+
+#if defined(_WIN64)
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+} // extern "C"
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
+
+#endif // defined(BOOST_USE_WINDOWS_H)
+
+#endif // defined(_MSC_VER)
+
+#endif
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp
new file mode 100644
index 0000000000..e2e18aa384
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_cas_based.hpp
@@ -0,0 +1,107 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_cas_based.hpp
+ *
+ * This header contains CAS-based implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct cas_based_exchange :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+};
+
+template< typename Base >
+struct cas_based_operations :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Base::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp
new file mode 100644
index 0000000000..5f197cea48
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_extending_cas_based.hpp
@@ -0,0 +1,69 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_extending_cas_based.hpp
+ *
+ * This header contains a boilerplate of the \c operations template implementation that requires sign/zero extension in arithmetic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extending_cas_based_operations :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+ typedef typename make_storage_type< Size >::type emulated_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ storage_type new_val;
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
+ }
+ while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ storage_type new_val;
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
+ }
+ while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return old_val;
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp
new file mode 100644
index 0000000000..85b1342982
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_alpha.hpp
@@ -0,0 +1,876 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_alpha.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
+ (HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.
+ */
+
+/*
+ NB: The most natural thing would be to write the increment/decrement
+ operators along the following lines:
+
+ __asm__ __volatile__
+ (
+ "1: ldl_l %0,%1 \n"
+ "addl %0,1,%0 \n"
+ "stl_c %0,%1 \n"
+ "beq %0,1b\n"
+ : "=&b" (tmp)
+ : "m" (value)
+ : "cc"
+ );
+
+ However according to the comments on the HP website and matching
+ comments in the Linux kernel sources this defies branch prediction,
+ as the cpu assumes that backward branches are always taken; so
+ instead copy the trick from the Linux kernel, introduce a forward
+ branch and back again.
+
+ I have, however, had a hard time measuring the difference between
+ the two versions in microbenchmarks -- I am leaving it in nevertheless
+ as it apparently does not hurt either.
+*/
+
+struct gcc_alpha_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("mb" ::: "memory");
+ }
+};
+
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_alpha_operations_base
+{
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %3, %1\n"
+ "ldl_l %0, %2\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stl_c %1, %4\n" // storage = desired; desired = store succeeded
+ "mov %1, %3\n" // success = desired
+ "2:\n"
+ : "+&r" (expected), // %0
+ "+&r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %5, %1\n" // tmp = desired
+ "ldl_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stl_c %1, %4\n" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n" // if (tmp == 0) goto retry
+ "mov %1, %3\n" // success = tmp
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous\n"
+
+ : "+&r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage), // %4
+ "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "and %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "bis %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "xor %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "zapnot %1, #1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "zapnot %1, #1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "sextb %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "sextb %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "zapnot %1, #3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "zapnot %1, #3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "sextw %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldl_l %0, %2\n"
+ "subl %0, %3, %1\n"
+ "sextw %1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_alpha_operations_base
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %3, %1\n"
+ "ldq_l %0, %2\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stq_c %1, %4\n" // storage = desired; desired = store succeeded
+ "mov %1, %3\n" // success = desired
+ "2:\n"
+ : "+&r" (expected), // %0
+ "+&r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "mov %5, %1\n" // tmp = desired
+ "ldq_l %2, %4\n" // current = *(&storage)
+ "cmpeq %2, %0, %3\n" // success = current == expected
+ "mov %2, %0\n" // expected = current
+ "beq %3, 2f\n" // if (success == 0) goto end
+ "stq_c %1, %4\n" // storage = tmp; tmp = store succeeded
+ "beq %1, 3f\n" // if (tmp == 0) goto retry
+ "mov %1, %3\n" // success = tmp
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous\n"
+
+ : "+&r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success) // %3
+ : "m" (storage), // %4
+ "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "addq %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "subq %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "and %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "bis %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldq_l %0, %2\n"
+ "xor %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), // %0
+ "=&r" (modified) // %1
+ : "m" (storage), // %2
+ "r" (v) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("mb" ::: "memory");
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp
new file mode 100644
index 0000000000..b32159536f
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm.hpp
@@ -0,0 +1,1397 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// From the ARM Architecture Reference Manual for architecture v6:
+//
+// LDREX{<cond>} <Rd>, [<Rn>]
+// <Rd> Specifies the destination register for the memory word addressed by <Rd>
+// <Rn> Specifies the register containing the address.
+//
+// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
+// <Rd> Specifies the destination register for the returned status value.
+// 0 if the operation updates memory
+// 1 if the operation fails to update memory
+// <Rm> Specifies the register containing the word to be stored to memory.
+// <Rn> Specifies the register containing the address.
+// Rd must not be the same register as Rm or Rn.
+//
+// ARM v7 is like ARM v6 plus:
+// There are half-word and byte versions of the LDREX and STREX instructions,
+// LDREXH, LDREXB, STREXH and STREXB.
+// There are also double-word versions, LDREXD and STREXD.
+// (Actually it looks like these are available from version 6k onwards.)
+// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
+// I think you can supply an immediate offset to the address.
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // load the original value
+ "strex %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (expected), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strex %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (expected), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 1u >::type storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ extended_storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
+ "strexb %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexbeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexb %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 2u >::type storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ extended_storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
+ "strexh %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexheq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexh %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd.
+// Any other instructions result in a non-atomic sequence of 32-bit accesses.
+// See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
+// Section A3.5.3 "Atomicity in the ARM architecture".
+
+// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values.
+// In order to pass the 64-bit operands to/from asm blocks, we use undocumented gcc feature:
+// the lower half (Rt) of the operand is accessible normally, via the numbered placeholder (e.g. %0),
+// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).
+// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "ldrexd %1, %H1, [%2]\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original) // %1
+ : "r" (&storage) // %2
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // load the original value
+ "strexd %0, %2, %H2, [%3]\n" // store the replacement, tmp = store failed
+ "teq %0, #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original) // %1
+ : "r" (v), // %2
+ "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t tmp;
+ storage_type original, old_val = expected;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "cmp %1, %2\n" // flags = original.lo==old_val.lo
+ "ittt eq\n" // [hint that the following 3 instructions are conditional on flags.equal]
+ "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
+ "strexdeq %0, %4, %H4, [%3]\n" // if (flags.equal) *(&storage) = desired, tmp = store failed
+ "teqeq %0, #0\n" // if (flags.equal) flags = tmp==0
+ "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
+ "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "+r" (old_val) // %2
+ : "r" (&storage), // %3
+ "r" (desired) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ const uint32_t success = (uint32_t)old_val;
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t tmp;
+ storage_type original, old_val = expected;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "cmp %1, %2\n" // flags = original.lo==old_val.lo
+ "it eq\n" // [hint that the following instruction is conditional on flags.equal]
+ "cmpeq %H1, %H2\n" // if (flags.equal) flags = original.hi==old_val.hi
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexd %0, %4, %H4, [%3]\n" // *(&storage) = desired, tmp = store failed
+ "teq %0, #0\n" // flags.equal = tmp == 0
+ "bne 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ "ite eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "moveq %2, #1\n" // if (flags.equal) old_val.lo = 1
+ "movne %2, #0\n" // if (!flags.equal) old_val.lo = 0
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "+r" (old_val) // %2
+ : "r" (&storage), // %3
+ "r" (desired) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ const uint32_t success = (uint32_t)old_val;
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = original;
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "adds %2, %1, %4\n" // result = original + value
+ "adc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "subs %2, %1, %4\n" // result = original - value
+ "sbc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "and %2, %1, %4\n" // result = original & value
+ "and %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "orr %2, %1, %4\n" // result = original | value
+ "orr %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "eor %2, %1, %4\n" // result = original ^ value
+ "eor %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ gcc_arm_operations_base::hardware_full_fence();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp
new file mode 100644
index 0000000000..73c04ffe15
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_arm_common.hpp
@@ -0,0 +1,134 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_arm_common.hpp
+ *
+ * This header contains basic utilities for gcc ARM backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// A memory barrier is effected using a "co-processor 15" instruction,
+// though a separate assembler mnemonic is available for it in v7.
+//
+// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
+// doesn't include all instructions and in particular it doesn't include the co-processor
+// instruction used for the memory barrier or the load-locked/store-conditional
+// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
+// asm blocks with code to temporarily change to ARM mode.
+//
+// You can only change between ARM and Thumb modes when branching using the bx instruction.
+// bx takes an address specified in a register. The least significant bit of the address
+// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
+// A temporary register is needed for the address and is passed as an argument to these
+// macros. It must be one of the "low" registers accessible to Thumb code, specified
+// using the "l" attribute in the asm statement.
+//
+// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
+// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
+// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
+// So in v7 we don't need to change to ARM mode; we can write "universal
+// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
+// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
+// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
+// so they can always be present.
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
+#else
+// The tmpreg may be wasted in this case, which is non-optimal.
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
+#endif
+
+struct gcc_arm_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
+ // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
+ // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
+ // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
+ __asm__ __volatile__
+ (
+#if defined(__thumb2__)
+ ".short 0xF3BF, 0x8F5B\n" // dmb ish
+#else
+ ".word 0xF57FF05B\n" // dmb ish
+#endif
+ :
+ :
+ : "memory"
+ );
+#else
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "mcr\tp15, 0, r0, c7, c10, 5\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : "=&l" (tmp)
+ :
+ : "memory"
+ );
+#endif
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp
new file mode 100644
index 0000000000..ce40e3b2b9
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_atomic.hpp
@@ -0,0 +1,392 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_atomic.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
+#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+
+#if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\
+ __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\
+ __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\
+ __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE
+// There are platforms where we need to use larger storage types
+#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__INTEL_COMPILER)
+// This is used to suppress warning #32013 described below for Intel Compiler.
+// In debug builds the compiler does not inline any functions, so basically
+// every atomic function call results in this warning. I don't know any other
+// way to selectively disable just this one warning.
+#pragma system_header
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*!
+ * The function converts \c boost::memory_order values to the compiler-specific constants.
+ *
+ * NOTE: The intention is that the function is optimized away by the compiler, and the
+ * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
+ * work in this case because the standard atomics interface require memory ordering
+ * constants to be passed as function arguments, at which point they stop being constexpr.
+ * However, it is crucial that the compiler sees constants and not runtime values,
+ * because otherwise it just ignores the ordering value and always uses seq_cst.
+ * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
+ * gcc 4.8.2. Intel Compiler issues a warning in this case:
+ *
+ * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
+ *
+ * while gcc acts silently.
+ *
+ * To mitigate the problem ALL functions, including the atomic<> members must be
+ * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
+ * all functions are called with constant orderings and call intrinstcts properly.
+ *
+ * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
+ * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
+ * all atomic operaions will be executed with seq_cst semantics.
+ */
+BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
+{
+ return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
+ (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
+ (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
+}
+
+template< std::size_t Size, bool Signed >
+struct gcc_atomic_operations
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+
+ // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
+ // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
+ // always set is_always_lock_free to true here.
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, false,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return __atomic_compare_exchange_n
+ (
+ &storage, &expected, desired, true,
+ atomics::detail::convert_memory_order_to_gcc(success_order),
+ atomics::detail::convert_memory_order_to_gcc(failure_order)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
+ }
+};
+
+#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
+#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
+// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+// A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< gcc_dcas_x86_64< Signed > >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+};
+
+#else
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public gcc_atomic_operations< 16u, Signed >
+{
+};
+
+#endif
+#endif
+
+
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< gcc_dcas_x86< Signed > >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+};
+
+#elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
+
+#define BOOST_ATOMIC_DETAIL_INT64_EXTENDED
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
+{
+};
+
+#else
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_atomic_operations< 8u, Signed >
+{
+};
+
+#endif
+#endif
+
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
+
+#define BOOST_ATOMIC_DETAIL_INT32_EXTENDED
+
+#if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
+{
+};
+
+#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
+{
+};
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
+
+#else
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_atomic_operations< 4u, Signed >
+{
+};
+
+#endif
+#endif
+
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
+
+#define BOOST_ATOMIC_DETAIL_INT16_EXTENDED
+
+#if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
+{
+};
+
+#else
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
+{
+};
+
+#endif
+
+#else
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_atomic_operations< 2u, Signed >
+{
+};
+
+#endif
+#endif
+
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+#if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
+ (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\
+ (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\
+ (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)
+
+#if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
+{
+};
+
+#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
+{
+};
+
+#else
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
+{
+};
+
+#endif
+
+#else
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_atomic_operations< 1u, Signed >
+{
+};
+
+#endif
+#endif
+
+#undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED
+#undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED
+#undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp
new file mode 100644
index 0000000000..a826736d17
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc.hpp
@@ -0,0 +1,1232 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_ppc.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// The implementation below uses information from this document:
+// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
+
+/*
+ Refer to: Motorola: "Programming Environments Manual for 32-Bit
+ Implementations of the PowerPC Architecture", Appendix E:
+ "Synchronization Programming Examples" for an explanation of what is
+ going on here (can be found on the web at various places by the
+ name "MPCFPE32B.pdf", Google is your friend...)
+
+ Most of the atomic operations map to instructions in a relatively
+ straight-forward fashion, but "load"s may at first glance appear
+ a bit strange as they map to:
+
+ lwz %rX, addr
+ cmpw %rX, %rX
+ bne- 1f
+ 1:
+
+ That is, the CPU is forced to perform a branch that "formally" depends
+ on the value retrieved from memory. This scheme has an overhead of
+ about 1-2 clock cycles per load, but it allows to map "acquire" to
+ the "isync" instruction instead of "sync" uniformly and for all type
+ of atomic operations. Since "isync" has a cost of about 15 clock
+ cycles, while "sync" hast a cost of about 50 clock cycles, the small
+ penalty to atomic loads more than compensates for this.
+
+ Byte- and halfword-sized atomic values are implemented in two ways.
+ When 8 and 16-bit instructions are available (in Power8 and later),
+ they are used. Otherwise operations are realized by encoding the
+ value to be represented into a word, performing sign/zero extension
+ as appropriate. This means that after add/sub operations the value
+ needs fixing up to accurately preserve the wrap-around semantic of
+ the smaller type. (Nothing special needs to be done for the bit-wise
+ and the "exchange type" operators as the compiler already sees to
+ it that values carried in registers are extended appropriately and
+ everything falls into place naturally).
+
+ The register constraint "b" instructs gcc to use any register
+ except r0; this is sometimes required because the encoding for
+ r0 is used to signify "constant zero" in a number of instructions,
+ making r0 unusable in this place. For simplicity this constraint
+ is used everywhere since I am to lazy to look this up on a
+ per-instruction basis, and ppc has enough registers for this not
+ to pose a problem.
+*/
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "stw %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "lwz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lwz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y1\n\t"
+ "stwcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "lwarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stwcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: lwarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stwcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 1u >::type storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "stb %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "lbz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lbz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y1\n\t"
+ "stbcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "lbarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stbcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: lbarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stbcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+template< >
+struct operations< 1u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 1u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "extsb %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "extsb %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 2u >::type storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "sth %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "lhz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lhz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y1\n\t"
+ "sthcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "lharx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "sthcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: lharx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "sthcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< >
+struct operations< 2u, false > :
+ public operations< 4u, false >
+{
+ typedef operations< 4u, false > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xffff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xffff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+template< >
+struct operations< 2u, true > :
+ public operations< 4u, true >
+{
+ typedef operations< 4u, true > base_type;
+ typedef base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "extsh %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "extsh %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "std %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ {
+ __asm__ __volatile__
+ (
+ "ld %0, %1\n\t"
+ "cmpd %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&b" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "ld %0, %1\n\t"
+ : "=&b" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y1\n\t"
+ "stdcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "ldarx %0,%y2\n\t"
+ "cmpd %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stdcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: ldarx %0,%y2\n\t"
+ "cmpd %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stdcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" (expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ {
+#if defined(__powerpc64__) || defined(__PPC64__)
+ if (order != memory_order_seq_cst)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ else
+ __asm__ __volatile__ ("sync" ::: "memory");
+#else
+ __asm__ __volatile__ ("sync" ::: "memory");
+#endif
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+#if defined(__ibmxl__) || defined(__IBMCPP__)
+ __fence();
+#else
+ __asm__ __volatile__ ("" ::: "memory");
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp
new file mode 100644
index 0000000000..e5c9303bf7
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_ppc_common.hpp
@@ -0,0 +1,70 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_ppc_common.hpp
+ *
+ * This header contains basic utilities for gcc PowerPC backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// The implementation below uses information from this document:
+// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+struct gcc_ppc_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__powerpc64__) || defined(__PPC64__)
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+#else
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("sync" ::: "memory");
+#endif
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("isync" ::: "memory");
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp
new file mode 100644
index 0000000000..19b9b1fa87
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sparc.hpp
@@ -0,0 +1,240 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2010 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_sparc.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_sparc_cas_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ }
+};
+
+template< bool Signed >
+struct gcc_sparc_cas32 :
+ public gcc_sparc_cas_base
+{
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ storage_type previous = expected;
+ __asm__ __volatile__
+ (
+ "cas [%1], %2, %0"
+ : "+r" (desired)
+ : "r" (&storage), "r" (previous)
+ : "memory"
+ );
+ const bool success = (desired == previous);
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = desired;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "swap [%1], %0"
+ : "+r" (v)
+ : "r" (&storage)
+ : "memory"
+ );
+ fence_after(order);
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public cas_based_operations< gcc_sparc_cas32< Signed > >
+{
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+template< bool Signed >
+struct gcc_sparc_cas64 :
+ public gcc_sparc_cas_base
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ storage_type previous = expected;
+ __asm__ __volatile__
+ (
+ "casx [%1], %2, %0"
+ : "+r" (desired)
+ : "r" (&storage), "r" (previous)
+ : "memory"
+ );
+ const bool success = (desired == previous);
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = desired;
+ return success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< cas_based_exchange< gcc_sparc_cas64< Signed > > >
+{
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ switch (order)
+ {
+ case memory_order_release:
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
+ break;
+ case memory_order_acq_rel:
+ __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
+ break;
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("membar #Sync" ::: "memory");
+ break;
+ case memory_order_relaxed:
+ default:
+ break;
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp
new file mode 100644
index 0000000000..1597de852a
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_sync.hpp
@@ -0,0 +1,240 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2011 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_sync.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_sync_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
+ __sync_synchronize();
+ }
+};
+
+template< std::size_t Size, bool Signed >
+struct gcc_sync_operations :
+ public gcc_sync_operations_base
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_add(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_sub(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
+ // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
+ // add a check here and fall back to a CAS loop.
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __sync_synchronize();
+ return __sync_lock_test_and_set(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type expected2 = expected;
+ storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
+
+ if (old_val == expected2)
+ {
+ return true;
+ }
+ else
+ {
+ expected = old_val;
+ return false;
+ }
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_and(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_or(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return __sync_fetch_and_xor(&storage, v);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __sync_synchronize();
+ return !!__sync_lock_test_and_set(&storage, 1);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ __sync_lock_release(&storage);
+ if (order == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+};
+
+#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 1u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
+ public gcc_sync_operations< 1u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ public extending_cas_based_operations< gcc_sync_operations< 2u, Signed >, 1u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 1u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 1u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 1u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 2u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ public gcc_sync_operations< 2u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 2u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 2u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 2u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 4u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ public gcc_sync_operations< 4u, Signed >
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 4u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 4u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 8u, Signed > :
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ public gcc_sync_operations< 8u, Signed >
+#else
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 8u, Signed >
+#endif
+{
+};
+#endif
+
+#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public gcc_sync_operations< 16u, Signed >
+{
+};
+#endif
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __sync_synchronize();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp
new file mode 100644
index 0000000000..007d4eeeeb
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86.hpp
@@ -0,0 +1,563 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_x86.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct gcc_x86_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+template< std::size_t Size, bool Signed, typename Derived >
+struct gcc_x86_operations :
+ public gcc_x86_operations_base
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_seq_cst)
+ {
+ fence_before(order);
+ storage = v;
+ fence_after(order);
+ }
+ else
+ {
+ Derived::exchange(storage, v, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return Derived::fetch_add(storage, -v, order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_x86_operations< 1u, Signed, operations< 1u, Signed > >
+{
+ typedef gcc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgb %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%al, %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_x86_operations< 2u, Signed, operations< 2u, Signed > >
+{
+ typedef gcc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgw %0, %1"
+ : "+q" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%ax, %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public gcc_x86_operations< 4u, Signed, operations< 4u, Signed > >
+{
+ typedef gcc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgl %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[new_val]\n\t"\
+ op " %%eax, %[new_val]\n\t"\
+ "lock; cmpxchgl %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [arg] "ir" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< gcc_dcas_x86< Signed > >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+};
+
+#elif defined(__x86_64__)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public gcc_x86_operations< 8u, Signed, operations< 8u, Signed > >
+{
+ typedef gcc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xaddq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "xchgq %0, %1"
+ : "+r" (v), "+m" (storage)
+ :
+ : "memory"
+ );
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1\n\t"
+ "sete %2"
+ : "+a" (previous), "+m" (storage), "=q" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ expected = previous;
+ return success;
+ }
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movq %[arg], %[new_val]\n\t"\
+ op " %%rax, %[new_val]\n\t"\
+ "lock; cmpxchgq %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [arg] "r" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< gcc_dcas_x86_64< Signed > >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order == memory_order_seq_cst)
+ {
+ __asm__ __volatile__
+ (
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
+ "mfence\n"
+#else
+ "lock; addl $0, (%%esp)\n"
+#endif
+ ::: "memory"
+ );
+ }
+ else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
+ {
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp
new file mode 100644
index 0000000000..4206bb39ef
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp
@@ -0,0 +1,556 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 - 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_x86_dcas.hpp
+ *
+ * This header contains implementation of the double-width CAS primitive for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/string_ops.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// Note: In the 32-bit PIC code guarded with BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX below we have to avoid using memory
+// operand constraints because the compiler may choose to use ebx as the base register for that operand. At least, clang
+// is known to do that. For this reason we have to pre-compute a pointer to storage and pass it in edi. For the same reason
+// we cannot save ebx to the stack with a mov instruction, so we use esi as a scratch register and restore it afterwards.
+// Alternatively, we could push/pop the register to the stack, but exchanging the registers is faster.
+// The need to pass a pointer in edi is a bit wasteful because normally the memory operand would use a base pointer
+// with an offset (e.g. `this` + offset). But unfortunately, there seems to be no way around it.
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct gcc_dcas_x86
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint32_t;
+
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
+ {
+#if defined(__SSE__)
+ typedef float xmm_t __attribute__((__vector_size__(16)));
+ xmm_t xmm_scratch;
+ __asm__ __volatile__
+ (
+#if defined(__AVX__)
+ "vmovq %[value], %[xmm_scratch]\n\t"
+ "vmovq %[xmm_scratch], %[storage]\n\t"
+#elif defined(__SSE2__)
+ "movq %[value], %[xmm_scratch]\n\t"
+ "movq %[xmm_scratch], %[storage]\n\t"
+#else
+ "xorps %[xmm_scratch], %[xmm_scratch]\n\t"
+ "movlps %[value], %[xmm_scratch]\n\t"
+ "movlps %[xmm_scratch], %[storage]\n\t"
+#endif
+ : [storage] "=m" (storage), [xmm_scratch] "=x" (xmm_scratch)
+ : [value] "m" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "fildll %[value]\n\t"
+ "fistpll %[storage]\n\t"
+ : [storage] "=m" (storage)
+ : [value] "m" (v)
+ : "memory"
+ );
+#endif
+ }
+ else
+ {
+#if defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %%esi\n\t"
+ "movl %%eax, %%ebx\n\t"
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
+ "jne 1b\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ :
+ : "a" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "edx", "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+ __asm__ __volatile__
+ (
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : [dest_lo] "=m" (storage), [dest_hi] "=m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : [value_lo] "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "eax", "edx", "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value;
+
+ if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
+ {
+#if defined(__SSE__)
+ typedef float xmm_t __attribute__((__vector_size__(16)));
+ xmm_t xmm_scratch;
+ __asm__ __volatile__
+ (
+#if defined(__AVX__)
+ "vmovq %[storage], %[xmm_scratch]\n\t"
+ "vmovq %[xmm_scratch], %[value]\n\t"
+#elif defined(__SSE2__)
+ "movq %[storage], %[xmm_scratch]\n\t"
+ "movq %[xmm_scratch], %[value]\n\t"
+#else
+ "xorps %[xmm_scratch], %[xmm_scratch]\n\t"
+ "movlps %[storage], %[xmm_scratch]\n\t"
+ "movlps %[xmm_scratch], %[value]\n\t"
+#endif
+ : [value] "=m" (value), [xmm_scratch] "=x" (xmm_scratch)
+ : [storage] "m" (storage)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "fildll %[storage]\n\t"
+ "fistpll %[value]\n\t"
+ : [value] "=m" (value)
+ : [storage] "m" (storage)
+ : "memory"
+ );
+#endif
+ }
+ else
+ {
+ // Note that despite const qualification cmpxchg8b below may issue a store to the storage. The storage value
+ // will not change, but this prevents the storage to reside in read-only memory.
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint32_t value_bits[2];
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %%eax\n\t"
+ "movl %%ecx, %%edx\n\t"
+ "lock; cmpxchg8b %[storage]\n\t"
+ : "=&a" (value_bits[0]), "=&d" (value_bits[1])
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ BOOST_ATOMIC_DETAIL_MEMCPY(&value, value_bits, sizeof(value));
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %%eax\n\t"
+ "movl %%ecx, %%edx\n\t"
+ "lock; cmpxchg8b %[storage]\n\t"
+ : "=&A" (value)
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ }
+
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+
+ // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
+ storage_type old_expected = expected;
+ expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
+ return expected == old_expected;
+
+#elif defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+
+ bool success;
+
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %%esi\n\t"
+ "lock; cmpxchg8b (%[dest])\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ : "+A" (expected), [success] "=@ccz" (success)
+ : "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %%esi\n\t"
+ "lock; cmpxchg8b (%[dest])\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ "sete %[success]\n\t"
+ : "+A" (expected), [success] "=qm" (success)
+ : "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
+ return success;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+
+ bool success;
+
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg8b %[dest]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), [success] "=@ccz" (success)
+ : "b" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg8b %[dest]\n\t"
+ "sete %[success]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), [success] "=qm" (success)
+ : "b" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
+ return success;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint32_t old_bits[2];
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %%esi\n\t"
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
+ "jne 1b\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ : "=a" (old_bits[0]), "=d" (old_bits[1])
+ : "S" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ storage_type old_value;
+ __asm__ __volatile__
+ (
+ "xchgl %%ebx, %%esi\n\t"
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
+ "jne 1b\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ : "=A" (old_value)
+ : "S" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return old_value;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+#if defined(__MINGW32__) && ((__GNUC__+0) * 100 + (__GNUC_MINOR__+0)) < 407
+
+ // MinGW gcc up to 4.6 has problems with allocating registers in the asm blocks below
+ uint32_t old_bits[2];
+ __asm__ __volatile__
+ (
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
+ "jne 1b\n\t"
+ : "=&a" (old_bits[0]), "=&d" (old_bits[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "DS" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint32_t old_bits[2];
+ __asm__ __volatile__
+ (
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : "=&a" (old_bits[0]), "=&d" (old_bits[1]), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ storage_type old_value;
+ __asm__ __volatile__
+ (
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : "=&A" (old_value), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return old_value;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct gcc_dcas_x86_64
+{
+ typedef typename make_storage_type< 16u >::type storage_type;
+ typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+ typedef uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint64_t;
+
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : [dest_lo] "=m" (storage), [dest_hi] "=m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "rax", "rdx", "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ // Note that despite const qualification cmpxchg16b below may issue a store to the storage. The storage value
+ // will not change, but this prevents the storage to reside in read-only memory.
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint64_t value_bits[2];
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
+ __asm__ __volatile__
+ (
+ "movq %%rbx, %%rax\n\t"
+ "movq %%rcx, %%rdx\n\t"
+ "lock; cmpxchg16b %[storage]\n\t"
+ : "=&a" (value_bits[0]), "=&d" (value_bits[1])
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ storage_type value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&value, value_bits, sizeof(value));
+ return value;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ storage_type value;
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
+ __asm__ __volatile__
+ (
+ "movq %%rbx, %%rax\n\t"
+ "movq %%rcx, %%rdx\n\t"
+ "lock; cmpxchg16b %[storage]\n\t"
+ : "=&A" (value)
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ return value;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+
+ // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
+ storage_type old_expected = expected;
+ expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
+ return expected == old_expected;
+
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ // Some compilers can't allocate rax:rdx register pair either but also don't support 128-bit __sync_val_compare_and_swap
+ bool success;
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg16b %[dest]\n\t"
+ "sete %[success]\n\t"
+ : [dest] "+m" (storage), "+a" (reinterpret_cast< aliasing_uint64_t* >(&expected)[0]), "+d" (reinterpret_cast< aliasing_uint64_t* >(&expected)[1]), [success] "=q" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ return success;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ bool success;
+
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg16b %[dest]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), "=@ccz" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg16b %[dest]\n\t"
+ "sete %[success]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), [success] "=qm" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
+ return success;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ uint64_t old_bits[2];
+ __asm__ __volatile__
+ (
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1]), "=&a" (old_bits[0]), "=&d" (old_bits[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ storage_type old_value;
+ __asm__ __volatile__
+ (
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : "=&A" (old_value), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+
+ return old_value;
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp
new file mode 100644
index 0000000000..16af1732cf
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_linux_arm.hpp
@@ -0,0 +1,180 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009, 2011 Helge Bahmann
+ * Copyright (c) 2009 Phil Endecott
+ * Copyright (c) 2013 Tim Blechmann
+ * Linux-specific code by Phil Endecott
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_linux_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// Different ARM processors have different atomic instructions. In particular,
+// architecture versions before v6 (which are still in widespread use, e.g. the
+// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
+// On Linux the kernel provides some support that lets us abstract away from
+// these differences: it provides emulated CAS and barrier functions at special
+// addresses that are guaranteed not to be interrupted by the kernel. Using
+// this facility is slightly slower than inline assembler would be, but much
+// faster than a system call.
+//
+// While this emulated CAS is "strong" in the sense that it does not fail
+// "spuriously" (i.e.: it never fails to perform the exchange when the value
+// found equals the value expected), it does not return the found value on
+// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
+// return the found value on failure, and we have to manually load this value
+// after the emulated CAS reports failure. This in turn introduces a race
+// between the CAS failing (due to the "wrong" value being found) and subsequently
+// loading (which might turn up the "right" value). From an application's
+// point of view this looks like "spurious failure", and therefore the
+// emulated CAS is only good enough to provide compare_exchange_weak
+// semantics.
+
+struct linux_arm_cas_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ typedef void (*kernel_dmb_t)(void);
+ ((kernel_dmb_t)0xffff0fa0)();
+ }
+};
+
+template< bool Signed >
+struct linux_arm_cas :
+ public linux_arm_cas_base
+{
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before_store(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ while (true)
+ {
+ storage_type tmp = expected;
+ if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
+ return true;
+ if (tmp != expected)
+ {
+ expected = tmp;
+ return false;
+ }
+ }
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);
+
+ if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)
+ {
+ return true;
+ }
+ else
+ {
+ expected = storage;
+ return false;
+ }
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 2u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >
+{
+};
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ linux_arm_cas_base::hardware_full_fence();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp
new file mode 100644
index 0000000000..608c6fddf8
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_arm.hpp
@@ -0,0 +1,824 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_arm.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <intrin.h>
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/type_traits/make_signed.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))
+#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+struct msvc_arm_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
+ }
+
+ static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ hardware_full_fence();
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ // Combine order flags together and promote memory_order_consume to memory_order_acquire
+ return static_cast< memory_order >(((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & ~static_cast< unsigned int >(memory_order_consume))
+ | (((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & static_cast< unsigned int >(memory_order_consume)) << 1u));
+ }
+};
+
+template< std::size_t Size, bool Signed, typename Derived >
+struct msvc_arm_operations :
+ public msvc_arm_operations_base
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ Derived::store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_arm_operations< 1u, Signed, operations< 1u, Signed > >
+{
+ typedef msvc_arm_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_arm_operations< 2u, Signed, operations< 2u, Signed > >
+{
+ typedef msvc_arm_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public msvc_arm_operations< 4u, Signed, operations< 4u, Signed > >
+{
+ typedef msvc_arm_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public msvc_arm_operations< 8u, Signed, operations< 8u, Signed > >
+{
+ typedef msvc_arm_operations< 8u, Signed, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before_store(order);
+ BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);
+ base_type::fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);
+ base_type::fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected, old_val;
+
+ switch (cas_common_order(success_order, failure_order))
+ {
+ case memory_order_relaxed:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));
+ break;
+ case memory_order_release:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
+ break;
+ }
+ expected = old_val;
+
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
+ break;
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ switch (order)
+ {
+ case memory_order_relaxed:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));
+ break;
+ case memory_order_release:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
+ break;
+ }
+ return v;
+ }
+};
+
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order != memory_order_relaxed)
+ msvc_arm_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32
+#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE8
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE16
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
+#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp
new file mode 100644
index 0000000000..53628f3600
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_common.hpp
@@ -0,0 +1,38 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_common.hpp
+ *
+ * This header contains common tools for MSVC implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// Define compiler barriers
+#if defined(__INTEL_COMPILER)
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier()
+#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
+extern "C" void _ReadWriteBarrier(void);
+#pragma intrinsic(_ReadWriteBarrier)
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() _ReadWriteBarrier()
+#endif
+
+#ifndef BOOST_ATOMIC_DETAIL_COMPILER_BARRIER
+#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER()
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp
new file mode 100644
index 0000000000..70b0ea994b
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_msvc_x86.hpp
@@ -0,0 +1,908 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_msvc_x86.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/type_traits/make_signed.hpp>
+#include <boost/atomic/capabilities.hpp>
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#include <boost/cstdint.hpp>
+#include <boost/atomic/detail/ops_cas_based.hpp>
+#endif
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// frame pointer register 'ebx' modified by inline assembly code. See the note below.
+#pragma warning(disable: 4731)
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
+extern "C" void _mm_mfence(void);
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_mm_mfence)
+#endif
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ * Implementation note for asm blocks.
+ *
+ * http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29
+ *
+ * Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.
+ * To be able to access both the local variables and the function parameters after the alignment, the compiler
+ * maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.
+ * If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX
+ * in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.
+ * Either move the eight-byte aligned types out of the function, or avoid using EBX.
+ *
+ * Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
+ * whenever we have to clobber it. Additionally, we disable warning C4731 above so that the compiler
+ * doesn't spam about ebx use.
+ */
+
+struct msvc_x86_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
+ _mm_mfence();
+#else
+ long tmp;
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
+#endif
+ }
+
+ static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ // On x86 and x86_64 there is no need for a hardware barrier,
+ // even if seq_cst memory order is requested, because all
+ // seq_cst writes are implemented with lock-prefixed operations
+ // or xchg which has implied lock prefix. Therefore normal loads
+ // are already ordered with seq_cst stores on these architectures.
+ }
+};
+
+template< std::size_t Size, bool Signed, typename Derived >
+struct msvc_x86_operations :
+ public msvc_x86_operations_base
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_seq_cst)
+ {
+ fence_before(order);
+ storage = v;
+ fence_after(order);
+ }
+ else
+ {
+ Derived::exchange(storage, v, order);
+ }
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after_load(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public msvc_x86_operations< 4u, Signed, operations< 4u, Signed > >
+{
+ typedef msvc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ }
+#else
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
+ return res;
+ }
+#endif
+};
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
+{
+ typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
+ }
+};
+
+#elif defined(_M_IX86)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
+{
+ typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xadd byte ptr [edx], al
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ xchg byte ptr [edx], al
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(success_order);
+ bool success;
+ __asm
+ {
+ mov esi, expected
+ mov edi, storage
+ movzx eax, byte ptr [esi]
+ movzx edx, desired
+ lock cmpxchg byte ptr [edi], dl
+ mov byte ptr [esi], al
+ sete success
+ };
+ // The success and failure fences are equivalent anyway
+ base_type::fence_after(success_order);
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ and dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ or dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ xor dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, al
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+};
+
+#else
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
+{
+ typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
+ }
+};
+
+#elif defined(_M_IX86)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
+{
+ typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xadd word ptr [edx], ax
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ xchg word ptr [edx], ax
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(success_order);
+ bool success;
+ __asm
+ {
+ mov esi, expected
+ mov edi, storage
+ movzx eax, word ptr [esi]
+ movzx edx, desired
+ lock cmpxchg word ptr [edi], dx
+ mov word ptr [esi], ax
+ sete success
+ };
+ // The success and failure fences are equivalent anyway
+ base_type::fence_after(success_order);
+ return success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ and dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ or dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ xor dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, ax
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+};
+
+#else
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+#endif
+
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
+
+template< bool Signed >
+struct msvc_dcas_x86
+{
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
+ //
+ // The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
+ // * Reading or writing a quadword aligned on a 64-bit boundary
+ //
+ // Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
+ // have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ storage_type volatile* p = &storage;
+ if (((uint32_t)p & 0x00000007) == 0)
+ {
+#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
+#if defined(__AVX__)
+ __asm
+ {
+ mov edx, p
+ vmovq xmm4, v
+ vmovq qword ptr [edx], xmm4
+ };
+#else
+ __asm
+ {
+ mov edx, p
+ movq xmm4, v
+ movq qword ptr [edx], xmm4
+ };
+#endif
+#else
+ __asm
+ {
+ mov edx, p
+ fild v
+ fistp qword ptr [edx]
+ };
+#endif
+ }
+ else
+ {
+ uint32_t backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov ebx, dword ptr [v]
+ mov ecx, dword ptr [v + 4]
+ mov eax, dword ptr [edi]
+ mov edx, dword ptr [edi + 4]
+ align 16
+ again:
+ lock cmpxchg8b qword ptr [edi]
+ jne again
+ mov ebx, backup
+ };
+ }
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ storage_type const volatile* p = &storage;
+ storage_type value;
+
+ if (((uint32_t)p & 0x00000007) == 0)
+ {
+#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
+#if defined(__AVX__)
+ __asm
+ {
+ mov edx, p
+ vmovq xmm4, qword ptr [edx]
+ vmovq value, xmm4
+ };
+#else
+ __asm
+ {
+ mov edx, p
+ movq xmm4, qword ptr [edx]
+ movq value, xmm4
+ };
+#endif
+#else
+ __asm
+ {
+ mov edx, p
+ fild qword ptr [edx]
+ fistp value
+ };
+#endif
+ }
+ else
+ {
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm
+ {
+ mov edi, p
+ mov eax, ebx
+ mov edx, ecx
+ lock cmpxchg8b qword ptr [edi]
+ mov dword ptr [value], eax
+ mov dword ptr [value + 4], edx
+ };
+ }
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ // MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,
+ // even though the _InterlockedCompareExchange64 intrinsic already provides one.
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ storage_type volatile* p = &storage;
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+ const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
+ const bool result = (old_val == expected);
+ expected = old_val;
+#else
+ bool result;
+ uint32_t backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov esi, expected
+ mov ebx, dword ptr [desired]
+ mov ecx, dword ptr [desired + 4]
+ mov eax, dword ptr [esi]
+ mov edx, dword ptr [esi + 4]
+ lock cmpxchg8b qword ptr [edi]
+ mov dword ptr [esi], eax
+ mov dword ptr [esi + 4], edx
+ mov ebx, backup
+ sete result
+ };
+#endif
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ storage_type volatile* p = &storage;
+ uint32_t backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov ebx, dword ptr [v]
+ mov ecx, dword ptr [v + 4]
+ mov eax, dword ptr [edi]
+ mov edx, dword ptr [edi + 4]
+ align 16
+ again:
+ lock cmpxchg8b qword ptr [edi]
+ jne again
+ mov ebx, backup
+ mov dword ptr [v], eax
+ mov dword ptr [v + 4], edx
+ };
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ return v;
+ }
+};
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public cas_based_operations< msvc_dcas_x86< Signed > >
+{
+};
+
+#elif defined(_M_AMD64)
+
+template< bool Signed >
+struct operations< 8u, Signed > :
+ public msvc_x86_operations< 8u, Signed, operations< 8u, Signed > >
+{
+ typedef msvc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
+ expected = old_val;
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
+ }
+};
+
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+template< bool Signed >
+struct msvc_dcas_x86_64
+{
+ typedef typename make_storage_type< 16u >::type storage_type;
+ typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value = const_cast< storage_type& >(storage);
+ while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type value = storage_type();
+ BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
+ return value;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+};
+
+template< bool Signed >
+struct operations< 16u, Signed > :
+ public cas_based_operations< cas_based_exchange< msvc_dcas_x86_64< Signed > > >
+{
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order == memory_order_seq_cst)
+ msvc_x86_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp
new file mode 100644
index 0000000000..d4ce6d95e7
--- /dev/null
+++ b/contrib/restricted/boost/atomic/include/boost/atomic/detail/ops_windows.hpp
@@ -0,0 +1,218 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2012 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_windows.hpp
+ *
+ * This header contains implementation of the \c operations template.
+ *
+ * This implementation is the most basic version for Windows. It should
+ * work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
+ * functions available. This version is also used for WinCE.
+ *
+ * Notably, this implementation is not as efficient as other
+ * versions based on compiler intrinsics.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/type_traits/make_signed.hpp>
+#include <boost/atomic/capabilities.hpp>
+#include <boost/atomic/detail/ops_msvc_common.hpp>
+#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+struct windows_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+ long tmp;
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
+ }
+
+ static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ }
+};
+
+template< std::size_t Size, bool Signed, typename Derived >
+struct windows_operations :
+ public windows_operations_base
+{
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Derived::exchange(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return Derived::fetch_add(const_cast< storage_type volatile& >(storage), (storage_type)0, order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
+ return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!Derived::exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+template< bool Signed >
+struct operations< 4u, Signed > :
+ public windows_operations< 4u, Signed, operations< 4u, Signed > >
+{
+ typedef windows_operations< 4u, Signed, operations< 4u, Signed > > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ storage_type previous = expected;
+ base_type::fence_before(success_order);
+ storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
+ expected = old_val;
+ // The success and failure fences are the same anyway
+ base_type::fence_after(success_order);
+ return (previous == old_val);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ base_type::fence_before(order);
+ v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
+ base_type::fence_after(order);
+ return v;
+#else
+ storage_type res = storage;
+ while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
+ return res;
+#endif
+ }
+};
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
+{
+};
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
+{
+};
+
+BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+ if (order == memory_order_seq_cst)
+ windows_operations_base::hardware_full_fence();
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+{
+ if (order != memory_order_relaxed)
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_